filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_9526 | # coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for the fairness example."""
import functools
import jax
import jax.numpy as jnp
import ott
def binary_cross_entropy(logits, labels):
return jnp.sum(
-labels * jnp.log(logits) - (1 - labels) * jnp.log(1 - logits))
def compute_metrics(logits, labels):
loss = binary_cross_entropy(logits, labels)
accuracy = jnp.mean((logits > 0.5) == labels)
metrics = {
'loss': loss,
'accuracy': accuracy,
}
metrics = jax.lax.pmean(metrics, axis_name='batch')
return metrics
@functools.partial(jax.jit, static_argnums=(2, 3))
def sort_group(inputs: jnp.ndarray,
in_group: jnp.ndarray,
quantization: int,
epsilon: float):
"""Sorts and quantizes only the member of the given group.
Args:
inputs: 1D array to be sorted.
in_group: a 1D array of 0s and 1s indicating if the element is part of the
group or not.
quantization: the number of values the sorted values output should be mapped
onto.
epsilon: sinkhorn entropic regularization.
Returns:
A sorted array of size `quantization`.
"""
a = in_group / jnp.sum(in_group)
b = jnp.ones(quantization) / quantization
ot = ott.tools.soft_sort.transport_for_sort(
inputs, a, b, dict(epsilon=epsilon))
return 1.0 / b * ot.apply(inputs, axis=0)
def fairness_regularizer(inputs: jnp.ndarray,
groups: jnp.ndarray,
quantization: int = 16,
epsilon: float = 1e-2,
num_groups: int = 2):
"""Approximation of the wasserstein between the per-group distributions."""
quantiles = jnp.stack([sort_group(inputs, groups == g, quantization, epsilon)
for g in range(num_groups)])
weights = jnp.stack(
[jnp.sum(groups == g) for g in range(num_groups)]) / groups.shape[0]
mean_quantile = jnp.sum(weights[:, None] * quantiles, axis=0)
delta = jnp.where(quantiles,
quantiles - mean_quantile,
jnp.zeros_like(mean_quantile))
return jnp.mean(delta ** 2)
|
the-stack_0_9527 | import tensorflow as tf
import numpy as np
import csv
import random
from keras.optimizers import Adam
from keras.layers import (
Flatten, Dense, Dropout, Convolution2D, Activation, BatchNormalization
)
from keras.models import Model, Sequential, model_from_json
from scipy.misc import imread, imresize
from sklearn.model_selection import train_test_split
import json
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_string('save_file', 'output_model', "The model and weights file to save (.json and .h5)")
flags.DEFINE_string('driving_log', 'driving_log.csv', 'The driving log.')
flags.DEFINE_integer('epochs', 8, "The number of epochs.")
flags.DEFINE_integer('batch_size', 64, "The batch size.")
flags.DEFINE_integer('epoch_sample', 1000, 'The epoch sample.')
flags.DEFINE_float('lrate', 0.001, 'The learning rate')
flags.DEFINE_integer('validation_sample', 1000, 'The validation sample.')
CSV_CENTER_IMAGE_INDEX = 0
CSV_LEFT_IMAGE_INDEX = 1
CSV_RIGHT_IMAGE_INDEX = 2
CSV_STEERING_IMAGE_INDEX = 3
CSV_THROTTLE_IMAGE_INDEX = 4
CSV_BRAKE_IMAGE_INDEX = 5
CSV_SPEED_IMAGE_INDEX = 6
def nvidia_model(image):
model = Sequential()
model.add(BatchNormalization(axis=1, input_shape=image.shape))
model.add(Convolution2D(16, 3, 3, border_mode='valid', subsample=(2, 2), activation='elu'))
model.add(Convolution2D(24, 3, 3, border_mode='valid', subsample=(1, 2), activation='elu'))
model.add(Convolution2D(36, 3, 3, border_mode='valid', activation='elu'))
model.add(Convolution2D(48, 2, 2, border_mode='valid', activation='elu'))
model.add(Convolution2D(48, 2, 2, border_mode='valid', activation='elu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Dropout(.5))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(1))
model.summary()
adam = Adam(lr=0.0001)
model.compile(loss='mse',
optimizer=adam)
return model
def load_csv(path):
csv_rows = []
with open(path, 'r') as infile:
reader = csv.reader(infile)
for row in reader:
csv_rows.append(row)
preprocess(csv_rows)
csv_rows_main, csv_rows_test = train_test_split(csv_rows, test_size=0.1)
csv_rows_train, csv_rows_val = train_test_split(csv_rows_main, test_size=0.1)
return (csv_rows_train, csv_rows_val, csv_rows_test)
def normalize(imgs):
"""
Normalize images between [-1, 1].
"""
return (imgs / 255.0) - 0.5
def flip(image, steering):
return (np.fliplr(image), -steering)
def crop(imgs):
result = []
for img in imgs:
result_img = img[10: , :, :]
result.append(result_img)
return result
def resize(imgs, shape=(20, 64, 3)):
"""
Resize images to shape.
"""
height, width, channels = shape
imgs_resized = np.empty([len(imgs), height, width, channels])
for i, img in enumerate(imgs):
imgs_resized[i] = imresize(img, shape)
return imgs_resized
def preprocess_image(img):
img = crop(img)
img = resize(img)
img = normalize(img)
return img
def generator_from(csv_rows):
while True:
for i in range(0, len(csv_rows)):
current_images = []
current_angeles = []
for j in range(0, FLAGS.batch_size):
angle = float(csv_rows[i][3])
current_images.append(imread(csv_rows[i][0].strip()).astype(np.float32))
current_angeles.append(angle)
if csv_rows[i][1] != '':
current_images.append(imread(csv_rows[i][1].strip()).astype(np.float32))
current_angeles.append(angle + .25)
if csv_rows[i][2] != '':
current_images.append(imread(csv_rows[i][2].strip()).astype(np.float32))
current_angeles.append(angle - .25)
(new_image, new_angle) = flip(imread(csv_rows[i][0]).astype(np.float32), angle)
current_images.append(new_image)
current_angeles.append(new_angle)
current_images = preprocess_image(current_images)
yield (current_images, current_angeles)
def get_image(path):
with open(path, mode='r') as infile:
reader = csv.reader(infile)
for rows in reader:
image = imread(rows[0]).astype(np.float32)
image = image - np.mean(image)
# csv.close(infile)
return preprocess_image(np.array([image]))[0]
# return image
def save_model(model):
print("Saving model...")
model.save_weights(FLAGS.weights_file)
model_as_json = model.to_json()
with open(FLAGS.model_file, "w") as model_file:
model_file.write(model_as_json)
print("Model saved.")
def save(model, prefix):
"""save model for future inspection and continuous training
"""
model_file = prefix + ".json"
weight_file = prefix + ".h5"
json.dump(model.to_json(), open(model_file, "w"))
model.save_weights(weight_file)
print("Model saved.")
return model
def restore(prefix):
"""restore a saved model
"""
model_file = prefix + ".json"
weight_file = prefix + ".h5"
model = model_from_json(json.load(open(model_file)))
model.load_weights(weight_file)
print("Model loaded.")
return model
def shuffle(csv_rows):
print("Shuffled the data.")
random.shuffle(csv_rows)
return csv_rows
def preprocess(csv_rows):
csv_rows = shuffle(csv_rows)
return csv_rows
def main(_):
(csv_rows_train, csv_rows_val, csv_rows_test) = load_csv(FLAGS.driving_log)
image = get_image(FLAGS.driving_log)
model = nvidia_model(image)
model.fit_generator(
generator=generator_from(csv_rows_train),
samples_per_epoch=FLAGS.epoch_sample,
nb_epoch=FLAGS.epochs,
validation_data=generator_from(csv_rows_val),
nb_val_samples=FLAGS.validation_sample,
)
# Evaluate the model
model.evaluate_generator(
generator=generator_from(csv_rows_test),
val_samples=FLAGS.testing_sample,
)
save(model, FLAGS.save_file)
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_9528 | from discord.ext import commands
import discord
import youtube_dl
import asyncio
from discord import utils
import sqlite3
class Player(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.db = sqlite3.connect('playlist.db')
@commands.command(name="์ ํฌ๋ซ")
async def ์ ํฌ๋ซ(self, ctx):
await ctx.send('์ ๋ถ๋ ์?')
@commands.command(name="์", aliases=["์ฌ์"])
async def ์(self, ctx: commands.Context, *q:str):
url = ' '.join(q)
ytdl_options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'outtmpl': f'./songs/{ctx.guild.id}/%(extractor)s-%(id)s.mp3'
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
loop = asyncio.get_event_loop()
if url.startswith('https://') or url.startswith('http://'):
utype = 'url'
else:
utype = 'query'
def fetch_video():
if utype == 'url':
return ytdl.extract_info(url)
elif utype == 'query':
return ytdl.extract_info(f"ytsearch:{url}")
data = await loop.run_in_executor(None, lambda: fetch_video())
if 'entries' in data:
data = data['entries'][0]
vc = self.get_voice_client(ctx)
source = ytdl.prepare_filename(data)
vc.play(source=discord.FFmpegPCMAudio(source=source))
embed = discord.Embed(title="ํ๋ ์ด์ค")
embed.add_field(name="๊ณก ์ ๋ณด", value=f"์ ๋ชฉ: {data.get('title')}")
await ctx.send(embed=embed)
@commands.command(name="stop", aliases=["์ ์ง"])
async def stop(self, ctx: commands.context):
self.get_voice_client(ctx).stop()
@commands.command(name="ํ๋ ์ด๋ฆฌ์คํธ", aliases=['playlist'])
async def playlist(self, ctx: commands.Context):
con = self.db
con.execute(f'CREATE TABLE IF NOT EXISTS pl_{ctx.guild.id}(query varchar(255))')
con.commit()
await ctx.send('ํ๋ ์ด๋ฆฌ์คํธ')
def get_voice_client(self, ctx: commands.Context) -> discord.VoiceClient:
return utils.get(self.bot.voice_clients, guild=ctx.guild)
|
the-stack_0_9532 | #!/usr/bin/python
# ---------------------------------------------------------------------------
# File: indefqpex1.py
# Version 12.8.0
# ---------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2009, 2017. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# ---------------------------------------------------------------------------
"""
Entering and optimizing an indefinite quadratic programming problem.
To run from the command line, use
python indefqpex1.py
"""
from __future__ import print_function
import cplex
def solve_and_display(p):
p.solve()
# solution.get_status() returns an integer code
print("Solution status = ", p.solution.get_status(), ":", end=' ')
# the following line prints the corresponding string
print(p.solution.status[p.solution.get_status()])
print("Solution value = ", p.solution.get_objective_value())
numrows = p.linear_constraints.get_num()
for i in range(numrows):
print("Row ", i, ": ", end=' ')
print("Slack = %10f " % p.solution.get_linear_slacks(i), end=' ')
print("Pi = %10f" % p.solution.get_dual_values(i))
numcols = p.variables.get_num()
for j in range(numcols):
print("Column ", j, ": ", end=' ')
print("Value = %10f " % p.solution.get_values(j), end=' ')
print("Reduced Cost = %10f" % p.solution.get_reduced_costs(j))
def indefqpex1():
# This example solves the non-convex quadratic programming problem
#
# minimize (-3 * pow(x,2) - 3 * pow(y,2 )- 1 * x * y)/2
#
# subject to:
# x + y >= 0
# -x + y >= 0
# -1 <= x <= 1
# 0 <= y <= 1
#
# This model has local optima at (1, 1), (-1, 1), (- 0.1666667,1) and
# (0,0) with objective values -3.5, -2.5, -1.4583333333 and 0.0
# respectively.
#
# After the initial solve, constraints are added to the model to
# force CPLEX to converge to some of these local optima in turn
p = cplex.Cplex()
p.variables.add(lb=[-1.0, 0.0], ub=[1.0, 1.0])
p.linear_constraints.add(lin_expr=[[[0, 1], [1.0, 1.0]],
[[0, 1], [-1.0, 1.0]]],
rhs=[0.0, 0.0],
senses=['G', 'G'])
p.objective.set_quadratic([[[0, 1], [-3.0, -0.5]],
[[0, 1], [-0.5, -3.0]]])
# When a non-convex objective function is present, CPLEX will
# raise an exception unless the optimalitytarget parameter is set to
# accept first-order optimal solutions
p.parameters.optimalitytarget.set(2)
# CPLEX may converge to either local optimum
solve_and_display(p)
# Add a constraint that cuts off the solution at (-1, 1)
p.linear_constraints.add(lin_expr=[[[0], [1.0]]],
rhs=[0.0],
senses='G',
names=["new_constraint"])
solve_and_display(p)
# Reverse the sense of the newly added constraint to cut off the
# solution at (1, 1)
p.linear_constraints.set_senses("new_constraint", 'L')
solve_and_display(p)
if __name__ == "__main__":
indefqpex1()
|
the-stack_0_9533 | import logging
from os.path import join
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import PlainTextResponse, RedirectResponse
from starlette.staticfiles import StaticFiles
import opsi
from opsi.util.networking import get_server_url
from opsi.util.templating import TemplateFolder
from .api import Api
from .test import WebserverTest
LOGGER = logging.getLogger(__name__)
class WebServer:
def __init__(self, program, frontend: str, port: int = 80, prefix="/"):
self.program = program
self.app = Starlette(debug=True)
self.url = get_server_url(program.lifespan, port, prefix)
self.template = TemplateFolder(join(frontend, "templates"))
self.app.add_route("/", self.template("nodetree.html"))
self.app.add_route(
"/settings",
self.template(
"settings.html",
persist=self.program.lifespan.persist,
daemon=self.program.lifespan.using_systemd,
nt=self.program.lifespan.NT_AVAIL,
netconf=self.program.lifespan.netconf_writable,
version=opsi.__version__,
),
)
self.testclient = WebserverTest(self.app)
self.api = Api(self.app, self.program)
self.make_hooks()
self.app.mount(
"/", CacheControlMiddleware(StaticFiles(directory=join(frontend, "www")))
)
def make_hooks(self):
PREFIX = "/hooks"
HOOKS = {
x[0]: x[1] for x in self.program.manager.hooks.items() if x[1].visible
} # {package: app}
self.app.add_route(
PREFIX, self.template("hooks.html", prefix=PREFIX, packages=HOOKS.keys())
)
# This is required because "/hooks/package/{path}"" and "/hooks/package/"" trigger the mounted app,
# but "/hooks/package" doesn't
self.app.add_route(PREFIX + "/{path}", self.trailingslash_redirect)
for package, hook in HOOKS.items():
path = PREFIX + "/" + package
hook.url = self.url + path.lstrip("/")
self.app.mount(path, hook.app)
def trailingslash_redirect(self, request):
return RedirectResponse(request.url.path + "/")
# These test functions go through the entire http pipeline
def get_funcs(self) -> str:
return self.testclient.get("/api/funcs")
def get_nodes(self) -> str:
return self.testclient.get("/api/nodes")
def set_nodes(self, data: str) -> str:
return self.testclient.post("/api/nodes", data)
class CacheControlMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
response = await call_next(request)
response.headers["Cache-Control"] = "no-cache public max-age=0 must-validate"
return response
|
the-stack_0_9534 | import numpy as np
import torch
from torch.autograd import Variable
from networks.losses import CombinedLoss
from torch.optim import lr_scheduler
import os
def per_class_dice(y_pred, y_true, num_class):
avg_dice = 0
y_pred = y_pred.data.cpu().numpy()
y_true = y_true.data.cpu().numpy()
for i in range(num_class):
GT = y_true == (i + 1)
Pred = y_pred == (i + 1)
inter = np.sum(np.matmul(GT, Pred)) + 0.0001
union = np.sum(GT) + np.sum(Pred) + 0.0001
t = 2 * inter / union
avg_dice = avg_dice + (t / num_class)
return avg_dice
def create_exp_directory(exp_dir_name):
if not os.path.exists('models/' + exp_dir_name):
os.makedirs('models/' + exp_dir_name)
class Solver(object):
# global optimiser parameters
default_optim_args = {"lr": 1e-2,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 0.0001}
gamma = 0.5
step_size = 5
NumClass = 9
def __init__(self, optim=torch.optim.Adam, optim_args={},
loss_func=CombinedLoss()):
optim_args_merged = self.default_optim_args.copy()
optim_args_merged.update(optim_args)
self.optim_args = optim_args_merged
self.optim = optim
self.loss_func = loss_func
self._reset_histories()
def _reset_histories(self):
"""
Resets train and val histories for the accuracy and the loss.
"""
self.train_loss_history = []
self.train_acc_history = []
self.val_acc_history = []
self.val_loss_history = []
def train(self, model, train_loader, val_loader, num_epochs=10, log_nth=5, exp_dir_name='exp_default'):
"""
Train a given model with the provided data.
Inputs:
- model: model object initialized from a torch.nn.Module
- train_loader: train data in torch.utils.data.DataLoader
- val_loader: val data in torch.utils.data.DataLoader
- num_epochs: total number of training epochs
- log_nth: log training accuracy and loss every nth iteration
"""
optim = self.optim(model.parameters(), **self.optim_args)
scheduler = lr_scheduler.StepLR(optim, step_size=self.step_size,
gamma=self.gamma) # decay LR by a factor of 0.5 every 5 epochs
self._reset_histories()
iter_per_epoch = 1
# iter_per_epoch = len(train_loader)
if torch.cuda.is_available():
model.cuda()
print('START TRAIN.')
curr_iter = 0
create_exp_directory(exp_dir_name)
for epoch in range(num_epochs):
scheduler.step()
for i_batch, sample_batched in enumerate(train_loader):
X = Variable(sample_batched[0])
y = Variable(sample_batched[1])
w = Variable(sample_batched[2])
if model.is_cuda:
X, y, w = X.cuda(), y.cuda(), w.cuda()
for iter in range(iter_per_epoch):
curr_iter += iter
optim.zero_grad()
output = model(X)
loss = self.loss_func(output, y, w)
loss.backward()
optim.step()
if iter % log_nth == 0:
self.train_loss_history.append(loss.data[0])
print('[Iteration : ' + str(iter) + '/' + str(iter_per_epoch * num_epochs) + '] : ' + str(
loss.data[0]))
#_, batch_output = torch.max(F.softmax(model(X),dim=1), dim=1)
#avg_dice = per_class_dice(batch_output, y, self.NumClass)
#print('Per class average dice score is ' + str(avg_dice))
# self.train_acc_history.append(train_accuracy)
#
# val_output = torch.max(model(Variable(torch.from_numpy(val_loader.dataset.X))), dim= 1)
# val_accuracy = self.accuracy(val_output[1], Variable(torch.from_numpy(val_loader.dataset.y)))
# self.val_acc_history.append(val_accuracy)
print('[Epoch : ' + str(epoch) + '/' + str(num_epochs) + '] : ' + str(loss.data[0]))
model.save('models/' + exp_dir_name + '/relaynet_epoch' + str(epoch + 1) + '.model')
print('FINISH.')
|
the-stack_0_9535 | import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import cv2
import numpy as np
from PIL import Image
from math import ceil
class VOCDataset(Dataset):
def __init__(self, root_path="data/VOCdevkit", dataset="voc2012", image_size=321, is_training=True):
self.dataset = dataset
if self.dataset == "voc2007":
self.data_path = os.path.join(root_path, "VOC2007")
if is_training:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/trainval.txt")
else:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/test.txt")
elif self.dataset == "voc2012":
self.data_path = os.path.join(root_path, "VOC2012")
if is_training:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/train.txt")
else:
id_list_path = os.path.join(self.data_path, "ImageSets/Segmentation/val.txt")
elif self.dataset == "augmentedvoc":
self.data_path = os.path.join(root_path, "VOCaugmented")
if is_training:
id_list_path = os.path.join(self.data_path, "list/train_aug.txt")
else:
id_list_path = os.path.join(self.data_path, "list/val.txt")
self.ids = [id.strip() for id in open(id_list_path)]
self.classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
self.image_size = image_size
self.num_classes = len(self.classes)
self.num_images = len(self.ids)
self.is_training = is_training
def __len__(self):
return self.num_images
def __getitem__(self, item):
id = self.ids[item]
if self.dataset in ["voc2007", "voc2012"]:
image_path = os.path.join(self.data_path, "JPEGImages", "{}.jpg".format(id))
gt_image_path = os.path.join(self.data_path, "SegmentationClass", "{}.png".format(id))
elif self.dataset == "augmentedvoc":
image_path = os.path.join(self.data_path, "img", "{}.jpg".format(id))
gt_image_path = os.path.join(self.data_path, "gt", "{}.png".format(id))
image = cv2.imread(image_path).astype(np.float32)
image[:, :, 0] -= 104.008
image[:, :, 1] -= 116.669
image[:, :, 2] -= 122.675
gt_image = Image.open(gt_image_path).convert('P')
gt_image = np.asarray(gt_image, np.int32)
gt_image[gt_image == 255] = 0
image = cv2.resize(image, (self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
gt_image = cv2.resize(gt_image, (self.image_size, self.image_size), interpolation=cv2.INTER_NEAREST)
gt_torch = torch.Tensor(torch.from_numpy(gt_image[None, None, :, :]).float())
gt1_size = ceil(self.image_size / 8.)
interp = nn.Upsample(size=(gt1_size, gt1_size), mode='bilinear', align_corners=True)
gt1 = interp(gt_torch).data.numpy()[0, 0, :, :]
gt2_size = ceil(self.image_size / 16.)
interp = nn.Upsample(size=(gt2_size, gt2_size), mode='bilinear', align_corners=True)
gt2 = interp(gt_torch).data.numpy()[0, 0, :, :]
return np.transpose(np.array(image, dtype=np.float32), (2, 0, 1)), np.array(gt1, dtype=np.float32), np.array(
gt2, dtype=np.float32)
|
the-stack_0_9536 | import bisect
import json
import logging
import random
from pathlib import Path
import discord
from discord.ext import commands
from bot.constants import Colours
log = logging.getLogger(__name__)
with Path("bot/resources/halloween/spooky_rating.json").open() as file:
SPOOKY_DATA = json.load(file)
SPOOKY_DATA = sorted((int(key), value) for key, value in SPOOKY_DATA.items())
class SpookyRating(commands.Cog):
"""A cog for calculating one's spooky rating."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.local_random = random.Random()
@commands.command()
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def spookyrating(self, ctx: commands.Context, who: discord.Member = None) -> None:
"""
Calculates the spooky rating of someone.
Any user will always yield the same result, no matter who calls the command
"""
if who is None:
who = ctx.author
# This ensures that the same result over multiple runtimes
self.local_random.seed(who.id)
spooky_percent = self.local_random.randint(1, 101)
# We need the -1 due to how bisect returns the point
# see the documentation for further detail
# https://docs.python.org/3/library/bisect.html#bisect.bisect
index = bisect.bisect(SPOOKY_DATA, (spooky_percent,)) - 1
_, data = SPOOKY_DATA[index]
embed = discord.Embed(
title=data['title'],
description=f'{who} scored {spooky_percent}%!',
color=Colours.orange
)
embed.add_field(
name='A whisper from Satan',
value=data['text']
)
embed.set_thumbnail(
url=data['image']
)
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Spooky Rating Cog load."""
bot.add_cog(SpookyRating(bot))
|
the-stack_0_9539 | from ctypes import byref, windll
from ctypes.wintypes import DWORD, HANDLE
from typing import Any, Optional, TextIO
from prompt_toolkit.data_structures import Size
from prompt_toolkit.renderer import Output
from prompt_toolkit.utils import is_windows
from prompt_toolkit.win32_types import STD_OUTPUT_HANDLE
from .color_depth import ColorDepth
from .vt100 import Vt100_Output
from .win32 import Win32Output
__all__ = [
"Windows10_Output",
]
# See: https://msdn.microsoft.com/pl-pl/library/windows/desktop/ms686033(v=vs.85).aspx
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
class Windows10_Output:
"""
Windows 10 output abstraction. This enables and uses vt100 escape sequences.
"""
def __init__(
self, stdout: TextIO, default_color_depth: Optional[ColorDepth] = None
) -> None:
self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth)
self.vt100_output = Vt100_Output(
stdout, lambda: Size(0, 0), default_color_depth=default_color_depth
)
self._hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
def flush(self) -> None:
"""
Write to output stream and flush.
"""
original_mode = DWORD(0)
# Remember the previous console mode.
windll.kernel32.GetConsoleMode(self._hconsole, byref(original_mode))
# Enable processing of vt100 sequences.
windll.kernel32.SetConsoleMode(
self._hconsole,
DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING),
)
try:
self.vt100_output.flush()
finally:
# Restore console mode.
windll.kernel32.SetConsoleMode(self._hconsole, original_mode)
@property
def responds_to_cpr(self) -> bool:
return False # We don't need this on Windows.
def __getattr__(self, name: str) -> Any:
if name in (
"get_size",
"get_rows_below_cursor_position",
"enable_mouse_support",
"disable_mouse_support",
"scroll_buffer_to_prompt",
"get_win32_screen_buffer_info",
"enable_bracketed_paste",
"disable_bracketed_paste",
"get_default_color_depth",
):
return getattr(self.win32_output, name)
else:
return getattr(self.vt100_output, name)
Output.register(Windows10_Output)
def is_win_vt100_enabled() -> bool:
"""
Returns True when we're running Windows and VT100 escape sequences are
supported.
"""
if not is_windows():
return False
hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
# Get original console mode.
original_mode = DWORD(0)
windll.kernel32.GetConsoleMode(hconsole, byref(original_mode))
try:
# Try to enable VT100 sequences.
result = windll.kernel32.SetConsoleMode(
hconsole, DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
)
return result == 1
finally:
windll.kernel32.SetConsoleMode(hconsole, original_mode)
|
the-stack_0_9541 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import six
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("reshape", v1=["reshape", "manip.reshape"])
def reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name
r"""Reshapes a tensor.
Given `tensor`, this operation returns a tensor that has the same values
as `tensor` with shape `shape`.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total size remains constant. In particular,
a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can
be -1.
If `shape` is 1-D or higher, then the operation returns a tensor with shape
`shape` filled with the values of `tensor`. In this case, the number of
elements implied by `shape` must be the same as the number of elements in
`tensor`.
For example:
```
# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
# tensor 't' is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor 't' is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
# -1 can also be used to infer the shape
# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]]
# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
```
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = gen_array_ops.reshape(tensor, shape, name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("fill")
def fill(dims, value, name=None):
r"""Creates a tensor filled with a scalar value.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
```
# Output tensor has shape [2, 3].
fill([2, 3], 9) ==> [[9, 9, 9]
[9, 9, 9]]
```
`tf.fill` differs from `tf.constant` in a few ways:
* `tf.fill` only supports scalar contents, whereas `tf.constant` supports
Tensor values.
* `tf.fill` creates an Op in the computation graph that constructs the
actual
Tensor value at runtime. This is in contrast to `tf.constant` which embeds
the entire Tensor into the graph with a `Const` node.
* Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
based on other runtime Tensors, unlike `tf.constant`.
Args:
dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.
Represents the shape of the output tensor.
value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
@compatibility(numpy) Equivalent to np.full @end_compatibility
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
result = gen_array_ops.fill(dims, value, name=name)
tensor_util.maybe_set_static_shape(result, dims)
return result
@tf_export("identity")
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
For example:
```python
import tensorflow as tf
val0 = tf.ones((1,), dtype=tf.float32)
a = tf.atan2(val0, val0)
a_identity = tf.identity(a)
print(a.numpy()) #[0.7853982]
print(a_identity.numpy()) #[0.7853982]
```
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.executing_eagerly() and not hasattr(input, "graph"):
input = ops.convert_to_tensor(input)
in_device = input.backing_device
# TODO(ashankar): Does 'identity' need to invoke execution callbacks?
context_device = context.context().device_name
if not context_device:
context_device = "/job:localhost/replica:0/task:0/device:CPU:0"
if context_device == in_device:
return input
else:
copied = input._copy() # pylint: disable=protected-access
if hasattr(copied, "_handle_data"):
copied._handle_data = input._handle_data # pylint: disable=protected-access
return copied
else:
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if either both or neither of `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
"""
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated("2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated("2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given known shapes.
When shape_x and shape_y are fully known TensorShapes this computes a
TensorShape which is the shape of the result of a broadcasting op applied in
tensors of shapes shape_x and shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
TensorShape whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors have statically known shapes.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return shape(input, name, out_type)
@tf_export(v1=["shape"])
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation (`int32` or `int64`).
Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if (context.executing_eagerly()
and not hasattr(input, "graph")
and not isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))
):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
dtypes.int64_ref)
def _check_index(idx):
"""Check if a given value is a valid index into a tensor."""
if isinstance(idx, (six.integer_types, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# Strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# Skip every other row and reverse the order of the columns
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
# Masks
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable object to slice
(i.e. tensor is the read-only view of this variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, ellipsis,
tf.newaxis or scalar int32/int64 tensors.
"""
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and (isinstance(s.start, ops.Tensor) or
s.start != sys.maxsize):
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and (isinstance(s.stop, ops.Tensor) or
s.stop != sys.maxsize):
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input_` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input_` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input_`. In other
words, `begin[i]` is the offset into the i'th dimension of `input_` that you
want to slice from.
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input_.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input_`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1, taking on the value at index
`begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if not (var is None and isinstance(op, ops.EagerTensor)):
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
else:
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: TypeError: If the slice indices aren't int, slice,
ellipsis, tf.newaxis or int32/int64 tensors.
"""
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.stack([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" %
(elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be converted
to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if ops.is_dense_tensor_like(elem):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
def _should_not_autopack(v):
# The condition we really want is
# ops.is_dense_tensor_like(...)
# but it is >5x slower due to abc.ABCMeta.__instancecheck__.
# pylint: disable=unidiomatic-typecheck
# TODO(slebedev): add nest.all?
return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
# pylint: enable=unidiomatic-typecheck
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref or _should_not_autopack(v):
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred if
`None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
```python
t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
tf.concat([t1, t2], -1)
```
would produce:
```python
[[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing for
axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers
to `axis`-th dimension. And negative axis refers to `axis +
rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_has_rank(0)
return identity(values[0], name=name)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(
tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
[first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer, then `value` is split along dimension
`axis` into `num_split` smaller tensors. This requires that `num_split` evenly
divides `value.shape[axis]`.
If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
element has the same size as the `value` except along dimension `axis` where
the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of splits along
split_dim or a 1-D integer `Tensor` or Python list containing the sizes of
each output tensor along split_dim. If a scalar then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split dimension
must match that of the `value`.
axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if isinstance(num_or_size_splits,
six.integer_types + (tensor_shape.Dimension,)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
transpose_fn = (
gen_array_ops.conjugate_transpose if
(conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
if perm is None:
a = ops.convert_to_tensor(a, name="a")
if not a.get_shape().ndims:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
else:
rank = a.get_shape().ndims
perm = (rank - 1) - np.arange(rank)
ret = transpose_fn(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if not context.executing_eagerly():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = transpose_fn(a, perm, name=name)
return ret
# pylint: disable=invalid-name
@tf_export(
"linalg.matrix_transpose",
v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.linalg.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.linalg.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `linalg.matrix_transpose` returns a new
tensor with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.linalg.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
@deprecation.deprecated_endpoints("matrix_diag")
def matrix_diag(diagonal,
name="diag",
k=0,
num_rows=-1,
num_cols=-1,
padding_value=0):
"""Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
diagonals of a matrix, with everything else padded with `padding`. `num_rows`
and `num_cols` specify the dimension of the innermost matrix of the output. If
both are not specified, the op assumes the innermost matrix is square and
infers its size from `k` and the innermost dimension of `diagonal`. If only
one of them is specified, the op assumes the unspecified value is the smallest
possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor
has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only
one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has
rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is
scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and
the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
output[i, j, ..., l, m, n] ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the
same batch (`M = k[1]-k[0]+1`), and the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, k[1]-d, n-max(d, 0)] ; if d_lower <= d <= d_upper
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`
For example:
```
# The main diagonal.
diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]],
[[5, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 7, 0],
[0, 0, 0, 8]]]
# A superdiagonal (per batch).
diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]],
[[0, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 6],
[0, 0, 0, 0]]]
# A band of diagonals.
diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)
[4, 5, 0]],
[[6, 7, 9],
[9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 0))
==> [[[1, 0, 0], # Output shape: (2, 3, 3)
[4, 2, 0],
[0, 5, 3]],
[[6, 0, 0],
[9, 7, 0],
[0, 1, 9]]]
# Rectangular matrix.
diagonal = np.array([1, 2]) # Input shape: (2)
tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4)
[1, 0, 0, 0],
[0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding = 9.
tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding = 9)
==> [[9, 9], # Output shape: (3, 2)
[1, 9],
[9, 2]]
```
Args:
diagonal: A `Tensor` with `rank k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
Returns:
A Tensor. Has the same type as `diagonal`.
"""
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/kernel_tests/diag_op_test.py)
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_v2(
diagonal=diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
name=name)
# Call v1 to maintain forward compatibility.
return gen_array_ops.matrix_diag(diagonal=diagonal, name=name)
@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
@deprecation.deprecated_endpoints("matrix_diag_part")
@dispatch.add_dispatch_support
def matrix_diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0):
"""Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
`input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
Let `max_diag_len` be the maximum length among all diagonals to be extracted,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
Let `num_diags` be the number of diagonals to extract,
`num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
`[I, J, ..., L, max_diag_len]` and values:
```
diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; when 0 <= n-y < M and 0 <= n-x < N,
0 ; otherwise.
```
where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions
`[I, J, ..., L, num_diags, max_diag_len]` with values:
```
diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; when 0 <= n-y < M and 0 <= n-x < N,
0 ; otherwise.
```
where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
The input must be at least a matrix.
For example:
```
input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8],
[9, 8, 7, 6]],
[[5, 4, 3, 2],
[1, 2, 3, 4],
[5, 6, 7, 8]]])
# A main diagonal from each batch.
tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch.
tf.matrix_diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3)
[4, 3, 8]]
# A tridiagonal band from each batch.
tf.matrix_diag_part(input, k = (-1, 1))
==> [[[2, 7, 6], # Output shape: (2, 3, 3)
[1, 6, 7],
[5, 8, 0]],
[[4, 3, 8],
[5, 2, 7],
[1, 6, 0]]]
# Padding = 9
tf.matrix_diag_part(input, k = (1, 3), padding = 9)
==> [[[4, 9, 9], # Output shape: (2, 3, 3)
[3, 8, 9],
[2, 7, 6]],
[[2, 9, 9],
[3, 4, 9],
[4, 3, 8]]]
```
Args:
input: A `Tensor` with `rank k >= 2`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
Returns:
A Tensor containing diagonals of `input`. Has the same type as `input`.
"""
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/kernel_tests/diag_op_test.py)
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(input, "dtype") and input.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_part_v2(
input=input, k=k, padding_value=padding_value, name=name)
# Call v1 to maintain forward compatibility.
return gen_array_ops.matrix_diag_part(input=input, name=name)
@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
@deprecation.deprecated_endpoints("matrix_set_diag")
def matrix_set_diag(
input, # pylint:disable=redefined-builtin
diagonal,
name="set_diag",
k=0):
"""Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the
same shape and values as `input`, except for the specified diagonals of the
innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
If `k` is scalar or `k[0] == k[1]`:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
output[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, k[1]-d, n-max(d, 0)] ; if d_lower <= d <= d_upper
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`
For example:
```
# The main diagonal.
input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
# A superdiagonal (per batch).
tf.matrix_diag(diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
[7, 7, 2, 7],
[7, 7, 7, 3]],
[[7, 4, 7, 7],
[7, 7, 5, 7],
[7, 7, 7, 6]]]
# A band of diagonals.
diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)
[4, 5, 0]],
[[6, 1, 2],
[3, 4, 0]]])
tf.matrix_diag(diagonals, k = (-1, 0))
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[4, 2, 7, 7],
[0, 5, 3, 7]],
[[6, 7, 7, 7],
[3, 1, 7, 7],
[7, 4, 2, 7]]]
```
Args:
input: A `Tensor` with rank `k + 1`, where `k >= 1`.
diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,
otherwise. `k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
"""
# LINT.IfChange
if compat.forward_compatible(2019, 8, 31):
# LINT.ThenChange(//tensorflow/python/kernel_tests/diag_op_test.py)
return gen_array_ops.matrix_set_diag_v2(
input=input, diagonal=diagonal, k=k, name=name)
# Call v1 to maintain forward compatibility.
return gen_array_ops.matrix_set_diag(
input=input, diagonal=diagonal, name=name)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except TypeError:
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.add_dispatch_support
def zeros_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]] with dtype=int32
If dtype of input `tensor` is `float32`, then the output is also of `float32`
tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor) # [[0., 0., 0.], [0., 0., 0.]] with dtype=floa32
If you want to specify desired dtype of output `tensor`, then specify it in
the op tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor,dtype=tf.int32) # [[0, 0, 0], [0, 0, 0]] with
dtype=int32
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(input, dtype, name, optimize=True)
def zeros_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 zeros_like API calls."""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.executing_eagerly():
if dtype is not None and dtype != tensor.dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
with ops.device(tensor.device):
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined() and
tensor.dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,
`complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.add_dispatch_support
def ones_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to 1. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 ones_like API calls."""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility(eager)
Placeholders are not compatible with eager execution.
@end_compatibility
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin
"""A placeholder op that passes through `input` when its output is not fed.
Args:
input: A `Tensor`. The default value to produce when output is not fed.
shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
return gen_array_ops.placeholder_with_default(input, shape, name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.sparse.placeholder(tf.float32)
y = tf.sparse.reduce_sum(x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will
succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if not tensor_util.is_tensor(constant_values) and constant_values == 0:
result = gen_array_ops.pad(tensor, paddings, name=name)
else:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and
not result.shape.is_fully_defined() and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, paddings, block_size=None, name=None, block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
shape `block_shape + [batch]`, interleaves these blocks back into the grid
defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
same rank as the input. The spatial dimensions of this intermediate result
are then optionally cropped according to `crops` to produce the output. This
is the reverse of SpaceToBatch. See below for a precise description.
Args:
input: A `Tensor`. N-D with shape `input_shape = [batch] + spatial_shape +
remaining_shape`, where spatial_shape has M dimensions.
block_shape: A `Tensor`. Must be one of the following types: `int32`,
`int64`. 1-D with shape `[M]`, all values must be >= 1. For backwards
compatibility with TF 1.0, this parameter may be an int, in which case it
is converted to `numpy.array([block_shape, block_shape],
dtype=numpy.int64)`.
crops: A `Tensor`. Must be one of the following types: `int32`, `int64`. 2-D
with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start,
crop_end]` specifies the amount to crop from input dimension `i + 1`,
which corresponds to spatial dimension `i`. It is required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,
block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,
input_shape[N-1]] 2. Permute dimensions of `reshaped` to produce
`permuted` of shape [batch / prod(block_shape), input_shape[1],
block_shape[0], ..., input_shape[M], block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]] 3. Reshape `permuted` to
produce `reshaped_permuted` of shape [batch / prod(block_shape),
input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]] 4. Crop the start and end of
dimensions `[1, ..., M]` of `reshaped_permuted` according to `crops` to
produce the
output of shape: [batch / prod(block_shape), input_shape[1] *
block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],
..., input_shape[N-1]]
Some examples: (1) For the following input of shape `[4, 1, 1, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` [[[[1]]],
[[[2]]], [[[3]]], [[[4]]]] ```
The output tensor has shape `[1, 2, 2, 1]` and value: ``` x = [[[[1],
[2]], [[3], [4]]]] ``` (2) For the following input of shape `[4, 1, 1,
3]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` [[[1, 2,
3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```
The output tensor has shape `[1, 2, 2, 3]` and value: ``` x = [[[[1, 2,
3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` (3) For the following
input of shape `[4, 2, 2, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` x =
[[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]],
[[13], [15]]], [[[6], [8]], [[14], [16]]]] ```
The output tensor has shape `[1, 4, 4, 1]` and value: ``` x = [[[1],
[2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]],
[[13], [14], [15], [16]]] ``` (4) For the following input of shape
`[8, 1, 3, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`: ``` x =
[[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0],
[10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6],
[8]]], [[[0], [14], [16]]]] ```
The output tensor has shape `[2, 2, 4, 1]` and value: ``` x = [[[[1],
[2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]] ```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(
input=input, block_shape=block_shape, crops=crops, name=name)
@tf_export("one_hot")
@dispatch.add_dispatch_support
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer
to a non-ragged axis. The output will be equivalent to applying 'one_hot' on
the values of the RaggedTensor, and creating a new RaggedTensor from the
result.
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
indices = tf.ragged.constant([[0, 1], [2]])
depth = 3
tf.one_hot(indices, depth) # output: [2 x None x 3]
# [[[1., 0., 0.],
# [0., 1., 0.]],
# [[0., 0., 1.]]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(
name, "one_hot",
[indices, depth, on_value, off_value, axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (
ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists else None)
off_dtype = (
ops.convert_to_tensor(off_value).dtype.base_dtype
if off_exists else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`.
Must be specified if `input` is a `RaggedTensor`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
@dispatch.add_dispatch_support
def squeeze_v2(input, axis=None, name=None):
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a
deprecated `squeeze_dims` argument.
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: The input cannot be converted to a tensor, or the specified
axis cannot be squeezed.
"""
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export(v1=["where"])
@deprecation.deprecated(
date=None,
instructions="Use tf.where in 2.0, "
"which has the same broadcast rule as np.where")
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are tensors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("where", v1=["where_v2"])
def where_v2(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `condition`, `x` and `y` must be broadcastable to the same
shape.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which is of the same type as `y`, and may be broadcastable with
`condition` and `y`.
y: A Tensor which is of the same type as `x`, and may be broadcastable with
`condition` and `x`.
name: A name of the operation (optional).
Returns:
A `Tensor` with the same type as `x` and `y`, and shape that
is broadcast from `condition`, `x`, and `y`, if `x`, `y` are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(None,
"seq_dim is deprecated, use seq_axis instead",
"seq_dim")
@deprecation.deprecated_args(None,
"batch_dim is deprecated, use batch_axis instead",
"batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
@tf_export("reverse_sequence", v1=[])
def reverse_sequence_v2(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None):
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0): # pylint: disable=g-doc-args
r"""Gather slices from params axis axis according to indices.
Gather slices from params axis `axis` according to `indices`. `indices` must
be an integer tensor of any dimension (usually 0-D or 1-D).
For 0-D (scalar) `indices`:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{5.1em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices, \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
For 1-D (vector) `indices` with `batch_dims=0`:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{2.6em}
> i, \hspace{2.6em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices[i], \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
In the general case, produces an output tensor where:
$$\begin{align*}
output[p_0, &..., p_{axis-1}, &
&i_{B}, ..., i_{M-1}, &
p_{axis + 1}, &..., p_{N-1}] = \\
params[p_0, &..., p_{axis-1}, &
indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}], &
p_{axis + 1}, &..., p_{N-1}]
\end{align*}$$
Where $$N$$=`ndims(params)`, $$M$$=`ndims(indices)`, and $$B$$=`batch_dims`.
Note that params.shape[:batch_dims] must be identical to
indices.shape[:batch_dims].
The shape of the output tensor is:
> `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
> params.shape[axis + 1:]`.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the corresponding
output value.
See also `tf.gather_nd`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
alt>
</div>
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
validate_indices: Deprecated, does nothing.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
batch_dims: An `integer`. The number of batch dimensions. Must be less
than `rank(indices)`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
del validate_indices
if compat.forward_compatible(2019, 9, 10):
if axis is None:
axis = batch_dims
if axis != 0:
return gen_array_ops.gather_v2(
params, indices, axis, batch_dims=batch_dims, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(
params, indices, axis, name=name)
if batch_dims != 0:
with ops.name_scope(name, "Gather", [params, indices, axis]):
return _batch_gather(params, indices, batch_dims, axis)
if axis is None:
axis = batch_dims
if axis != 0:
# Note that we do a sparse_read here to avoid snapshotting the entire
# resource variable and doing a gather, which can be inefficient and lead to
# subtle race conditions. TODO(apassos) implement axis != 0 on sparse_read
return gen_array_ops.gather_v2(params, indices, axis, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables without
# introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
return gather(
params,
indices,
validate_indices=validate_indices,
name=name,
axis=axis,
batch_dims=batch_dims)
gather_v2.__doc__ = gather.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
r"""Gather slices from params according to indices with leading batch dims.
This operation assumes that the leading `batch_dims` dimensions of `indices`
and `params` are batch dimensions; and performs a `tf.gather` operation within
each batch. (If `batch_dims` is not specified, then it defaults to
`rank(indices)-1`.) In the case in which `batch_dims==0`, this operation
is equivalent to `tf.gather`.
Args:
params: A Tensor. The tensor from which to gather values.
indices: A Tensor. Must be one of the following types: int32, int64. Index
tensor. Must be in range `[0, params.shape[batch_dims]]`.
batch_dims: An integer or none. The number of batch dimensions. Must be
less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
Returns:
A Tensor. Has the same type as `params`.
Raises:
ValueError: if `indices` has an unknown shape.
"""
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines
a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Additionally both 'params' and 'indices' can have M leading batch
dimensions that exactly match. In this case 'batch_dims' must be M.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
The examples below are for the case when only indices have leading extra
dimensions. If both 'params' and 'indices' have leading batch dimensions, use
the 'batch_dims' parameter to run gather_nd in batch mode.
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Examples with batched 'params' and 'indices':
```python
batch_dims = 1
indices = [[1], [0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
batch_dims = 1
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0']], [['a1', 'b1']]]
batch_dims = 1
indices = [[[1, 0]], [[0, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0'], ['b1']]
```
See also `tf.gather`.
Args:
params: A `Tensor`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
Returns:
A `Tensor`. Has the same type as `params`.
"""
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
if compat.forward_compatible(2019, 4, 29):
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.gather_nd(indices, name=name)
except AttributeError:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
"""gather_nd implementation with batch support."""
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list
]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(
index_grid,
concat([
index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
],
axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
@tf_export(v1=["quantize_v2"])
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO"):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantization.quantize instead of
# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
# version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@deprecation.deprecated_endpoints("quantize")
def quantize(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name)
@tf_export("quantization.quantize_and_dequantize")
def quantize_and_dequantize(input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False):
"""Quantizes then dequantizes a tensor.
Args:
input: A `Tensor` to quantize and dequantize.
input_min: If range_given=True, the minimum input value that needs to be
represented in the quantized representation.
input_max: If range_given=True, the maximum input value that needs to be
represented in the quantized representation.
signed_input: True if the quantization is signed or unsigned.
num_bits: The bitwidth of the quantization.
range_given: If true use `input_min` and `input_max` for the range of the
input, otherwise determine min and max from the input `Tensor`.
round_mode: Rounding mode when rounding from float values to quantized ones.
name: Optional name for the operation.
narrow_range: If true, then the absolute value of the quantized minimum
value is the same as the quantized maximum value, instead of 1 greater.
i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
Returns:
A `Tensor`. Each element is the result of quantizing and dequantizing the
corresponding element of `input`.
"""
return gen_array_ops.quantize_and_dequantize_v2(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
name=name)
@tf_export("searchsorted")
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
"""Searches input tensor for values on the innermost dimension.
A 2-D example:
```
sorted_sequence = [[0, 3, 9, 9, 10],
[1, 2, 3, 4, 5]]
values = [[2, 4, 9],
[0, 2, 6]]
result = searchsorted(sorted_sequence, values, side="left")
result == [[1, 2, 2],
[0, 1, 5]]
result = searchsorted(sorted_sequence, values, side="right")
result == [[1, 2, 4],
[0, 2, 5]]
```
Args:
sorted_sequence: N-D `Tensor` containing a sorted sequence.
values: N-D `Tensor` containing the search values.
side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
upper_bound.
out_type: The output type (`int32` or `int64`). Default is `tf.int32`.
name: Optional name for the operation.
Returns:
An N-D `Tensor` the size of values containing the result of applying either
lower_bound or upper_bound (depending on side) to each value. The result
is not a global index to the entire `Tensor`, but the index in the last
dimension.
Raises:
ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
If the total size of values exceeds `2^31 - 1` elements.
If the first `N-1` dimensions of the two tensors don't match.
"""
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_patches")
def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
r"""Extract `patches` from `images`.
This op collects patches from the input image, as if applying a
convolution. All extracted patches are stacked in the depth (last) dimension
of the output.
Specifically, the op extracts patches of shape `sizes` which are `strides`
apart in the input image. The output is subsampled using the `rates` argument,
in the same manner as "atrous" or "dilated" convolutions.
The result is a 4D tensor which is indexed by batch, row, and column.
`output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`
which is taken from the input starting at
`images[i, x*strides[1], y*strides[2]]`.
Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where
`depth` is `images.shape[3]`.
The output elements are taken from the input at intervals given by the `rate`
argument, as in dilated convolutions.
The `padding` argument has no effect on the size of each patch, it determines
how many patches are extracted. If `VALID`, only patches which are fully
contained in the input image are included. If `SAME`, all patches whose
starting point is inside the input are included, and areas outside the input
default to zero.
Example:
```
n = 10
# images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100
images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
# We generate two outputs as follows:
# 1. 3x3 patches with stride length 5
# 2. Same as above, but the rate is increased to 2
tf.extract_image_patches(images=images,
ksizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 1, 1, 1],
padding='VALID')
# Yields:
[[[[ 1 2 3 11 12 13 21 22 23]
[ 6 7 8 16 17 18 26 27 28]]
[[51 52 53 61 62 63 71 72 73]
[56 57 58 66 67 68 76 77 78]]]]
```
If we mark the pixels in the input image which are taken for the output with
`*`, we see the pattern:
```
* * * 4 5 * * * 9 10
* * * 14 15 * * * 19 20
* * * 24 25 * * * 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
* * * 54 55 * * * 59 60
* * * 64 65 * * * 69 70
* * * 74 75 * * * 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
```
```
tf.extract_image_patches(images=images,
sizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 2, 2, 1],
padding='VALID')
# Yields:
[[[[ 1 3 5 21 23 25 41 43 45]
[ 6 8 10 26 28 30 46 48 50]]
[[ 51 53 55 71 73 75 91 93 95]
[ 56 58 60 76 78 80 96 98 100]]]]
```
We can again draw the effect, this time using the symbols `*`, `x`, `+` and
`o` to distinguish the patches:
```
* 2 * 4 * x 7 x 9 x
11 12 13 14 15 16 17 18 19 20
* 22 * 24 * x 27 x 29 x
31 32 33 34 35 36 37 38 39 40
* 42 * 44 * x 47 x 49 x
+ 52 + 54 + o 57 o 59 o
61 62 63 64 65 66 67 68 69 70
+ 72 + 74 + o 77 o 79 o
81 82 83 84 85 86 87 88 89 90
+ 92 + 94 + o 97 o 99 o
```
Args:
images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
sizes: The size of the extracted patches. Must
be [1, size_rows, size_cols, 1].
strides: A 1-D Tensor of length 4. How far the centers of two consecutive
patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
This is the input stride, specifying how far two consecutive patch samples
are in the input. Equivalent to extracting patches with `patch_sizes_eff =
patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
them spatially by a factor of `rates`. This is equivalent to `rate` in
dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A 4-D Tensor of the same type as the input.
"""
return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
"ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
ksizes)
return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
@tf_export("fingerprint")
def fingerprint(data, method="farmhash64", name=None):
r"""Generates fingerprint values.
Generates fingerprint values of `data`.
Fingerprint op considers the first dimension of `data` as the batch dimension,
and `output[i]` contains the fingerprint value generated from contents in
`data[i, ...]` for all `i`.
Fingerprint op writes fingerprint values as byte arrays. For example, the
default method `farmhash64` generates a 64-bit fingerprint value at a time.
This 8-byte value is written out as an `tf.uint8` array of size 8, in
little-endian order.
For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),
and that the fingerprint method is `farmhash64`. In this case, the output
shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the
size of each fingerprint value in bytes. `output[0, :]` is generated from
12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from
other 12 integers in `data[1, :, :]`.
Note that this op fingerprints the raw underlying buffer, and it does not
fingerprint Tensor's metadata such as data type and/or shape. For example, the
fingerprint values are invariant under reshapes and bitcasts as long as the
batch dimension remain the same:
```python
tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))
tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))
```
For string data, one should expect `tf.fingerprint(data) !=
tf.fingerprint(tf.string.reduce_join(data))` in general.
Args:
data: A `Tensor`. Must have rank 1 or higher.
method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.
Currently available method is `farmhash64`.
name: A name for the operation (optional).
Returns:
A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
`data`'s first dimension, and the second dimension size depends on the
fingerprint algorithm.
"""
return gen_array_ops.fingerprint(data, method, name)
|
the-stack_0_9542 | import random
import json
from sqlalchemy import Boolean
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.expression import cast
from dallinger.models import Info
from dallinger.models import Transformation
from dallinger.nodes import Agent
from dallinger.nodes import Source
class MCMCPAgent(Agent):
__mapper_args__ = {"polymorphic_identity": "MCMCP_agent"}
def update(self, infos):
info = infos[0]
self.replicate(info)
new_info = AnimalInfo(origin=self, contents=info.perturbed_contents())
Perturbation(info_in=info, info_out=new_info)
def _what(self):
infos = self.infos()
return [i for i in infos if i.chosen][0]
class AnimalSource(Source):
"""A source that transmits animal shapes."""
__mapper_args__ = {"polymorphic_identity": "animal_source"}
def create_information(self):
"""Create a new Info.
transmit() -> _what() -> create_information().
"""
return AnimalInfo(origin=self, contents=None)
class AnimalInfo(Info):
"""An Info that can be chosen."""
__mapper_args__ = {"polymorphic_identity": "vector_info"}
@hybrid_property
def chosen(self):
"""Use property1 to store whether an info was chosen."""
try:
return bool(self.property1)
except TypeError:
return None
@chosen.setter
def chosen(self, chosen):
"""Assign chosen to property1."""
self.property1 = repr(chosen)
@chosen.expression
def chosen(self):
"""Retrieve chosen via property1."""
return cast(self.property1, Boolean)
properties = {
"foot_spread": [0, 1],
"body_height": [0.1, 1.5],
"body_tilt": [-15, 45],
"tail_length": [0.05, 1.2],
"tail_angle": [-45, 190],
"neck_length": [0, 2.5],
"neck_angle": [90, 180],
"head_length": [0.05, 0.75],
"head_angle": [5, 80],
}
def __init__(self, origin, contents=None, **kwargs):
if contents is None:
data = {}
for prop, prop_range in self.properties.items():
data[prop] = random.uniform(prop_range[0], prop_range[1])
contents = json.dumps(data)
super(AnimalInfo, self).__init__(origin, contents, **kwargs)
def perturbed_contents(self):
"""Perturb the given animal."""
animal = json.loads(self.contents)
for prop, prop_range in self.properties.items():
range = prop_range[1] - prop_range[0]
jittered = animal[prop] + random.gauss(0, 0.1 * range)
animal[prop] = max(min(jittered, prop_range[1]), prop_range[0])
return json.dumps(animal)
class Perturbation(Transformation):
"""A perturbation is a transformation that perturbs the contents."""
__mapper_args__ = {"polymorphic_identity": "perturbation"}
|
the-stack_0_9543 | from __future__ import absolute_import
from __future__ import unicode_literals
try:
from collections import MutableSequence, MutableMapping
except ImportError:
from collections.abc import MutableSequence, MutableMapping
from collections import OrderedDict
import ast
from functools import reduce
from inflection import underscore
def reshape(schema, data):
reshaped = [] if isinstance(schema, MutableSequence) else OrderedDict()
def _reshape(schema, data, new_data):
if isinstance(schema, MutableMapping):
for idx, (key, value) in enumerate(schema.items()):
try:
d = data[key]
except KeyError:
continue
new_data[key] = (
[] if isinstance(value, MutableSequence) else {}
)
if not value:
new_data[key] = data[key]
else:
_reshape(value, d, new_data[key])
elif isinstance(schema, MutableSequence):
schema = schema[0]
for idx, datum in enumerate(data):
try:
new_data[idx]
except IndexError:
new_data.append({})
_reshape(schema, datum, new_data[idx])
else:
new_data[schema] = data[schema]
_reshape(schema, data, reshaped)
return reshaped
def nargs_to_dict(nargs):
args = zip(nargs[0::2], nargs[1::2])
d = reduce(rec_nargs_to_dict, args, {})
return {'fields': d}
def rec_nargs_to_dict(accum, kv):
k, v = kv
keys = k.split('.')
if len(keys) > 1:
accum[keys[0]] = rec_nargs_to_dict({}, (keys[1], eval_value(v)))
else:
accum[keys[0]] = eval_value(v)
return accum
def eval_value(value):
try:
return ast.literal_eval(value)
except (SyntaxError, ValueError):
return value
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = (
underscore(parent_key) +
sep +
underscore(k) if parent_key else underscore(k)
)
if isinstance(v, MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return OrderedDict(sorted(items))
|
the-stack_0_9544 | # Copyright (c) 2018-2020, Manfred Moitzi
# License: MIT License
import pytest
from math import radians
import ezdxf
from ezdxf.math import Vector, BoundingBox
from ezdxf.render.forms import cube
from ezdxf.render.mesh import MeshVertexMerger, MeshBuilder, MeshTransformer, MeshAverageVertexMerger
from ezdxf.addons import SierpinskyPyramid
def test_vertex_merger_indices():
merger = MeshVertexMerger()
indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert indices == indices2
def test_vertex_merger_vertices():
merger = MeshVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.vertices == [(1, 2, 3), (4, 5, 6)]
def test_vertex_merger_index_of():
merger = MeshVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.index((1, 2, 3)) == 0
assert merger.index((4, 5, 6)) == 1
with pytest.raises(IndexError):
merger.index((7, 8, 9))
def test_average_vertex_merger_indices():
merger = MeshAverageVertexMerger()
indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert indices == indices2
def test_average_vertex_merger_vertices():
merger = MeshAverageVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.vertices == [(1, 2, 3), (4, 5, 6)]
def test_average_vertex_merger_index_of():
merger = MeshAverageVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.index((1, 2, 3)) == 0
assert merger.index((4, 5, 6)) == 1
with pytest.raises(IndexError):
merger.index((7, 8, 9))
def test_mesh_builder():
dwg = ezdxf.new('R2000')
pyramid = SierpinskyPyramid(level=4, sides=3)
pyramid.render(dwg.modelspace(), merge=False)
meshes = dwg.modelspace().query('MESH')
assert len(meshes) == 256
def test_vertex_merger():
pyramid = SierpinskyPyramid(level=4, sides=3)
faces = pyramid.faces()
mesh = MeshVertexMerger()
for vertices in pyramid:
mesh.add_mesh(vertices=vertices, faces=faces)
assert len(mesh.vertices) == 514
assert len(mesh.faces) == 1024
def test_average_vertex_merger():
pyramid = SierpinskyPyramid(level=4, sides=3)
faces = pyramid.faces()
mesh = MeshAverageVertexMerger()
for vertices in pyramid:
mesh.add_mesh(vertices=vertices, faces=faces)
assert len(mesh.vertices) == 514
assert len(mesh.faces) == 1024
REGULAR_FACE = Vector.list([(0, 0, 0), (1, 0, 1), (1, 1, 1), (0, 1, 0)])
IRREGULAR_FACE = Vector.list([(0, 0, 0), (1, 0, 1), (1, 1, 0), (0, 1, 0)])
def test_has_none_planar_faces():
mesh = MeshBuilder()
mesh.add_face(REGULAR_FACE)
assert mesh.has_none_planar_faces() is False
mesh.add_face(IRREGULAR_FACE)
assert mesh.has_none_planar_faces() is True
def test_scale_mesh():
mesh = cube(center=False)
mesh.scale(2, 3, 4)
bbox = BoundingBox(mesh.vertices)
assert bbox.extmin.isclose((0, 0, 0))
assert bbox.extmax.isclose((2, 3, 4))
def test_rotate_x():
mesh = cube(center=False)
mesh.rotate_x(radians(90))
bbox = BoundingBox(mesh.vertices)
assert bbox.extmin.isclose((0, -1, 0))
assert bbox.extmax.isclose((1, 0, 1))
@pytest.fixture(scope='module')
def msp():
doc = ezdxf.new()
return doc.modelspace()
@pytest.fixture(scope='module')
def cube_polyface(msp):
p = msp.add_polyface()
p.append_faces(cube().faces_as_vertices())
return p
def test_from_empty_polyface(msp):
empty_polyface = msp.add_polyface()
b = MeshBuilder.from_polyface(empty_polyface)
assert len(b.vertices) == 0
assert len(b.faces) == 0
def test_from_cube_polyface(cube_polyface):
b = MeshBuilder.from_polyface(cube_polyface)
assert len(b.vertices) == 24 # unoptimized mesh builder
assert len(b.faces) == 6
def test_render_polyface(cube_polyface):
doc = ezdxf.new()
msp = doc.modelspace()
t = MeshTransformer.from_polyface(cube_polyface)
assert len(t.vertices) == 24 # unoptimized mesh builder
assert len(t.faces) == 6
t.render_polyface(msp)
new_polyface = msp[-1]
assert new_polyface.dxftype() == 'POLYLINE'
assert new_polyface.is_poly_face_mesh is True
assert len(new_polyface.vertices) == 8 + 6
assert new_polyface.vertices[0] is not cube_polyface.vertices[0]
def test_from_polymesh(msp):
polymesh = msp.add_polymesh(size=(4, 4))
b = MeshBuilder.from_polyface(polymesh)
n = polymesh.dxf.n_count
m = polymesh.dxf.m_count
nfaces = (n - 1) * (m - 1)
assert len(b.vertices) == nfaces * 4 # unoptimized mesh builder
assert len(b.faces) == nfaces
def test_from_polyface_type_error(msp):
polyline = msp.add_polyline3d([(0, 0, 0), (1, 0, 0)])
with pytest.raises(TypeError):
MeshBuilder.from_polyface(polyline)
line = msp.add_line(start=(0, 0, 0), end=(1, 0, 0))
with pytest.raises(TypeError):
MeshBuilder.from_polyface(line)
|
the-stack_0_9545 | import datetime
from unittest import TestCase
from tarentsocialwall.WordpressConnector import WordpressConnector
class TestWordpressConnector(TestCase):
service = None
def setUp(self):
self.service = WordpressConnector()
def test_convert_to_socialpost_from_event_empty(self):
event = []
social_posts = [] # type: List[SocialPost]
self.service.convert_to_socialpost(event, social_posts)
def test_convert_to_socialpost_from_event_corectly(self):
self.service.access_token = "i have access"
event = {}
event['id'] = '123456'
event['title'] = {}
event['title']['rendered'] = 'test'
event['content'] = {}
event['content']['rendered'] = 'test'
event['date'] = datetime.datetime.now().strftime(self.service.wordpressDateFormat)
events = [event]
social_posts = []
self.service.convert_to_socialpost(events, social_posts)
self.assertTrue(len(social_posts) == 1)
social_post = social_posts[0]
externalId = social_post.externalId
self.assertTrue(externalId == '123456') |
the-stack_0_9546 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class MSDeploy(ProxyOnlyResource):
"""MSDeploy ARM PUT information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param package_uri: Package URI
:type package_uri: str
:param connection_string: SQL Connection String
:type connection_string: str
:param db_type: Database Type
:type db_type: str
:param set_parameters_xml_file_uri: URI of MSDeploy Parameters file. Must
not be set if SetParameters is used.
:type set_parameters_xml_file_uri: str
:param set_parameters: MSDeploy Parameters. Must not be set if
SetParametersXmlFileUri is used.
:type set_parameters: dict[str, str]
:param skip_app_data: Controls whether the MSDeploy operation skips the
App_Data directory.
If set to <code>true</code>, the existing App_Data directory on the
destination
will not be deleted, and any App_Data directory in the source will be
ignored.
Setting is <code>false</code> by default.
:type skip_app_data: bool
:param app_offline: Sets the AppOffline rule while the MSDeploy operation
executes.
Setting is <code>false</code> by default.
:type app_offline: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'package_uri': {'key': 'properties.packageUri', 'type': 'str'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'db_type': {'key': 'properties.dbType', 'type': 'str'},
'set_parameters_xml_file_uri': {'key': 'properties.setParametersXmlFileUri', 'type': 'str'},
'set_parameters': {'key': 'properties.setParameters', 'type': '{str}'},
'skip_app_data': {'key': 'properties.skipAppData', 'type': 'bool'},
'app_offline': {'key': 'properties.appOffline', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(MSDeploy, self).__init__(**kwargs)
self.package_uri = kwargs.get('package_uri', None)
self.connection_string = kwargs.get('connection_string', None)
self.db_type = kwargs.get('db_type', None)
self.set_parameters_xml_file_uri = kwargs.get('set_parameters_xml_file_uri', None)
self.set_parameters = kwargs.get('set_parameters', None)
self.skip_app_data = kwargs.get('skip_app_data', None)
self.app_offline = kwargs.get('app_offline', None)
|
the-stack_0_9547 | # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# -*- mode: python; tab-width: 4 -*-
#
# Copyright (C) 2001 Michael Teo <[email protected]>
# nmb.py - NetBIOS library
#
# This software is provided 'as-is', without any express or implied warranty.
# In no event will the author be held liable for any damages arising from the
# use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice cannot be removed or altered from any source distribution.
#
# Altered source done by Alberto Solino (@agsolino)
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import errno
import re
import select
import socket
import string
import time
import random
from struct import pack, unpack
from six import byte2int
from .structure import Structure
# Our random number generator
try:
rand = random.SystemRandom()
except NotImplementedError:
rand = random
pass
################################################################################
# CONSTANTS
################################################################################
# Taken from socket module reference
INADDR_ANY = '0.0.0.0'
BROADCAST_ADDR = '<broadcast>'
# Default port for NetBIOS name service
NETBIOS_NS_PORT = 137
# Default port for NetBIOS session service
NETBIOS_SESSION_PORT = 139
# Default port for SMB session service
SMB_SESSION_PORT = 445
# Owner Node Type Constants
NODE_B = 0x0000
NODE_P = 0x2000
NODE_M = 0x4000
NODE_RESERVED = 0x6000
NODE_GROUP = 0x8000
NODE_UNIQUE = 0x0
# Name Type Constants
TYPE_UNKNOWN = 0x01
TYPE_WORKSTATION = 0x00
TYPE_CLIENT = 0x03
TYPE_SERVER = 0x20
TYPE_DOMAIN_MASTER = 0x1B
TYPE_DOMAIN_CONTROLLER = 0x1C
TYPE_MASTER_BROWSER = 0x1D
TYPE_BROWSER = 0x1E
TYPE_NETDDE = 0x1F
TYPE_STATUS = 0x21
# Opcodes values
OPCODE_QUERY = 0
OPCODE_REGISTRATION = 0x5 << 11
OPCODE_RELEASE = 0x6 << 11
OPCODE_WACK = 0x7 << 11
OPCODE_REFRESH = 0x8 << 11
OPCODE_REQUEST = 0 << 11
OPCODE_RESPONSE = 0x10 << 11
# NM_FLAGS
NM_FLAGS_BROADCAST = 0x1 << 4
NM_FLAGS_UNICAST = 0 << 4
NM_FLAGS_RA = 0x8 << 4
NM_FLAGS_RD = 0x10 << 4
NM_FLAGS_TC = 0x20 << 4
NM_FLAGS_AA = 0x40 << 4
# QUESTION_TYPE
QUESTION_TYPE_NB = 0x20 # NetBIOS general Name Service Resource Record
QUESTION_TYPE_NBSTAT = 0x21 # NetBIOS NODE STATUS Resource Record
# QUESTION_CLASS
QUESTION_CLASS_IN = 0x1 # Internet class
# RESOURCE RECORD RR_TYPE field definitions
RR_TYPE_A = 0x1 # IP address Resource Record
RR_TYPE_NS = 0x2 # Name Server Resource Record
RR_TYPE_NULL = 0xA # NULL Resource Record
RR_TYPE_NB = 0x20 # NetBIOS general Name Service Resource Record
RR_TYPE_NBSTAT = 0x21 # NetBIOS NODE STATUS Resource Record
# RESOURCE RECORD RR_CLASS field definitions
RR_CLASS_IN = 1 # Internet class
# RCODE values
RCODE_FMT_ERR = 0x1 # Format Error. Request was invalidly formatted.
RCODE_SRV_ERR = 0x2 # Server failure. Problem with NBNS, cannot process name.
RCODE_IMP_ERR = 0x4 # Unsupported request error. Allowable only for challenging NBNS when gets an Update type
# registration request.
RCODE_RFS_ERR = 0x5 # Refused error. For policy reasons server will not register this name from this host.
RCODE_ACT_ERR = 0x6 # Active error. Name is owned by another node.
RCODE_CFT_ERR = 0x7 # Name in conflict error. A UNIQUE name is owned by more than one node.
# NAME_FLAGS
NAME_FLAGS_PRM = 0x0200 # Permanent Name Flag. If one (1) then entry is for the permanent node name. Flag is zero
# (0) for all other names.
NAME_FLAGS_ACT = 0x0400 # Active Name Flag. All entries have this flag set to one (1).
NAME_FLAG_CNF = 0x0800 # Conflict Flag. If one (1) then name on this node is in conflict.
NAME_FLAG_DRG = 0x1000 # Deregister Flag. If one (1) then this name is in the process of being deleted.
# NB_FLAGS
NB_FLAGS_ONT_B = 0
NB_FLAGS_ONT_P = 1 << 13
NB_FLAGS_ONT_M = 2 << 13
NB_FLAGS_G = 1 << 15
NAME_TYPES = {TYPE_UNKNOWN: 'Unknown', TYPE_WORKSTATION: 'Workstation', TYPE_CLIENT: 'Client',
TYPE_SERVER: 'Server', TYPE_DOMAIN_MASTER: 'Domain Master', TYPE_DOMAIN_CONTROLLER: 'Domain Controller',
TYPE_MASTER_BROWSER: 'Master Browser', TYPE_BROWSER: 'Browser Server', TYPE_NETDDE: 'NetDDE Server',
TYPE_STATUS: 'Status'}
# NetBIOS Session Types
NETBIOS_SESSION_MESSAGE = 0x0
NETBIOS_SESSION_REQUEST = 0x81
NETBIOS_SESSION_POSITIVE_RESPONSE = 0x82
NETBIOS_SESSION_NEGATIVE_RESPONSE = 0x83
NETBIOS_SESSION_RETARGET_RESPONSE = 0x84
NETBIOS_SESSION_KEEP_ALIVE = 0x85
################################################################################
# HELPERS
################################################################################
def encode_name(name, type, scope):
# ToDo: Rewrite this simpler, we're using less than written
"""
Perform first and second level encoding of name as specified in RFC 1001 (Section 4)
:param string name: the name to encode
:param integer type: the name type constants
:param string scope: the name's scope
:return string: the encoded name.
"""
if name == '*':
name += '\0' * 15
elif len(name) > 15:
name = name[:15] + chr(type)
else:
name = name.ljust(15) + chr(type)
encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name)
if scope:
encoded_scope = ''
for s in string.split(scope, '.'):
encoded_scope = encoded_scope + chr(len(s)) + s
return (encoded_name + encoded_scope) + '\0'
else:
return encoded_name + '\0'
# Internal method for use in encode_name()
def _do_first_level_encoding(m):
s = ord(m.group(0))
return string.ascii_uppercase[s >> 4] + string.ascii_uppercase[s & 0x0f]
def decode_name(name):
# ToDo: Rewrite this simpler, we're using less than written
"""
Perform first and second level decoding of name as specified in RFC 1001 (Section 4)
:param string name: the name to dencode
:return string: the decoded name.
"""
name_length = ord(name[0])
assert name_length == 32
decoded_name = re.sub('..', _do_first_level_decoding, name[1:33])
if name[33] == '\0':
return 34, decoded_name, ''
else:
decoded_domain = ''
offset = 34
while 1:
domain_length = byte2int(name[offset:offset+1])
if domain_length == 0:
break
decoded_domain = '.' + name[offset:offset + domain_length]
offset += domain_length
return offset + 1, decoded_name, decoded_domain
def _do_first_level_decoding(m):
s = m.group(0)
return chr(((ord(s[0]) - ord('A')) << 4) | (ord(s[1]) - ord('A')))
ERRCLASS_QUERY = 0x00
ERRCLASS_SESSION = 0xf0
ERRCLASS_OS = 0xff
QUERY_ERRORS = {0x01: 'Format Error. Request was invalidly formatted',
0x02: 'Server failure. Problem with NBNS, cannot process name.',
0x03: 'Name does not exist',
0x04: 'Unsupported request error. Allowable only for challenging NBNS when gets an Update type registration request.',
0x05: 'Refused error. For policy reasons server will not register this name from this host.',
0x06: 'Active error. Name is owned by another node.',
0x07: 'Name in conflict error. A UNIQUE name is owned by more than one node.',
}
SESSION_ERRORS = {0x80: 'Not listening on called name',
0x81: 'Not listening for calling name',
0x82: 'Called name not present',
0x83: 'Sufficient resources',
0x8f: 'Unspecified error'
}
class NetBIOSError(Exception):
def __init__(self, error_message='', error_class=None, error_code=None):
self.error_class = error_class
self.error_code = error_code
self.error_msg = error_message
def get_error_code(self):
return self.error
def getErrorCode(self):
return self.get_error_code()
def get_error_string(self):
return str(self)
def getErrorString(self):
return str(self)
def __str__(self):
if self.error_code is not None:
if self.error_code in QUERY_ERRORS:
return '%s-%s(%s)' % (self.error_msg, QUERY_ERRORS[self.error_code], self.error_code)
elif self.error_code in SESSION_ERRORS:
return '%s-%s(%s)' % (self.error_msg, SESSION_ERRORS[self.error_code], self.error_code)
else:
return '%s(%s)' % (self.error_msg, self.error_code)
else:
return '%s' % self.error_msg
class NetBIOSTimeout(Exception):
def __init__(self, message = 'The NETBIOS connection with the remote host timed out.'):
Exception.__init__(self, message)
################################################################################
# 4.2 NAME SERVER PACKETS
################################################################################
class NBNSResourceRecord(Structure):
structure = (
('RR_NAME','z=\x00'),
('RR_TYPE','>H=0'),
('RR_CLASS','>H=0'),
('TTL','>L=0'),
('RDLENGTH','>H-RDATA'),
('RDATA',':=""'),
)
class NBNodeStatusResponse(NBNSResourceRecord):
def __init__(self, data = 0):
NBNSResourceRecord.__init__(self, data)
self.mac = b'00-00-00-00-00-00'
self.num_names = unpack('B', self['RDATA'][:1])[0]
self.entries = list()
data = self['RDATA'][1:]
for _ in range(self.num_names):
entry = NODE_NAME_ENTRY(data)
data = data[len(entry):]
self.entries.append(entry)
self.statistics = STATISTICS(data)
self.set_mac_in_hexa(self.statistics['UNIT_ID'])
def set_mac_in_hexa(self, data):
data_aux = u''
for d in bytearray(data):
if data_aux == '':
data_aux = '%02x' % d
else:
data_aux += '-%02x' % d
self.mac = data_aux.upper()
def get_mac(self):
return self.mac
def rawData(self):
res = pack('!B', self.num_names )
for i in range(0, self.num_names):
res += self.entries[i].getData()
class NBPositiveNameQueryResponse(NBNSResourceRecord):
def __init__(self, data = 0):
NBNSResourceRecord.__init__(self, data)
self.entries = [ ]
rdata = self['RDATA']
while len(rdata) > 0:
entry = ADDR_ENTRY(rdata)
rdata = rdata[len(entry):]
self.entries.append(socket.inet_ntoa(entry['NB_ADDRESS']))
# 4.2.1. GENERAL FORMAT OF NAME SERVICE PACKETS
class NAME_SERVICE_PACKET(Structure):
commonHdr = (
('NAME_TRN_ID','>H=0'),
('FLAGS','>H=0'),
('QDCOUNT','>H=0'),
('ANCOUNT','>H=0'),
('NSCOUNT','>H=0'),
('ARCOUNT','>H=0'),
)
structure = (
('ANSWERS',':'),
)
# 4.2.1.2. QUESTION SECTION
class QUESTION_ENTRY(Structure):
commonHdr = (
('QUESTION_NAME','z'),
('QUESTION_TYPE','>H=0'),
('QUESTION_CLASS','>H=0'),
)
# 4.2.1.3. RESOURCE RECORD
class RESOURCE_RECORD(Structure):
structure = (
('RR_NAME','z=\x00'),
('RR_TYPE','>H=0'),
('RR_CLASS','>H=0'),
('TTL','>L=0'),
('RDLENGTH','>H-RDATA'),
('RDATA',':=""'),
)
# 4.2.2. NAME REGISTRATION REQUEST
class NAME_REGISTRATION_REQUEST(NAME_SERVICE_PACKET):
structure = (
('QUESTION_NAME', 'z'),
('QUESTION_TYPE', '>H=0'),
('QUESTION_CLASS', '>H=0'),
('RR_NAME','z', ),
('RR_TYPE', '>H=0'),
('RR_CLASS','>H=0'),
('TTL', '>L=0'),
('RDLENGTH', '>H=6'),
('NB_FLAGS', '>H=0'),
('NB_ADDRESS', '4s=""'),
)
def __init__(self, data=None):
NAME_SERVICE_PACKET.__init__(self,data)
self['FLAGS'] = OPCODE_REQUEST | NM_FLAGS_RD | OPCODE_REGISTRATION
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 1
self['QUESTION_TYPE'] = QUESTION_TYPE_NB
self['QUESTION_CLASS'] = QUESTION_CLASS_IN
self['RR_TYPE'] = RR_TYPE_NB
self['RR_CLASS'] = RR_CLASS_IN
# 4.2.3. NAME OVERWRITE REQUEST & DEMAND
class NAME_OVERWRITE_REQUEST(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
self['FLAGS'] = OPCODE_REQUEST | OPCODE_REGISTRATION
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 1
# 4.2.4. NAME REFRESH REQUEST
class NAME_REFRESH_REQUEST(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
self['FLAGS'] = OPCODE_REFRESH | 0x1
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 1
# 4.2.5. POSITIVE NAME REGISTRATION RESPONSE
# 4.2.6. NEGATIVE NAME REGISTRATION RESPONSE
# 4.2.7. END-NODE CHALLENGE REGISTRATION RESPONSE
class NAME_REGISTRATION_RESPONSE(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
# 4.2.8. NAME CONFLICT DEMAND
class NAME_CONFLICT_DEMAND(NAME_REGISTRATION_REQUEST):
def __init__(self, data=None):
NAME_REGISTRATION_REQUEST.__init__(self,data)
# ToDo: 4.2.9. NAME RELEASE REQUEST & DEMAND
# ToDo: 4.2.10. POSITIVE NAME RELEASE RESPONSE
# ToDo: 4.2.11. NEGATIVE NAME RELEASE RESPONSE
# 4.2.12. NAME QUERY REQUEST
class NAME_QUERY_REQUEST(NAME_SERVICE_PACKET):
structure = (
('QUESTION_NAME', 'z'),
('QUESTION_TYPE', '>H=0'),
('QUESTION_CLASS', '>H=0'),
)
def __init__(self, data=None):
NAME_SERVICE_PACKET.__init__(self,data)
self['FLAGS'] = OPCODE_REQUEST | OPCODE_REGISTRATION | NM_FLAGS_RD
self['RCODE'] = 0
self['QDCOUNT'] = 1
self['ANCOUNT'] = 0
self['NSCOUNT'] = 0
self['ARCOUNT'] = 0
self['QUESTION_TYPE'] = QUESTION_TYPE_NB
self['QUESTION_CLASS'] = QUESTION_CLASS_IN
# 4.2.13. POSITIVE NAME QUERY RESPONSE
class ADDR_ENTRY(Structure):
structure = (
('NB_FLAGS', '>H=0'),
('NB_ADDRESS', '4s=""'),
)
# ToDo: 4.2.15. REDIRECT NAME QUERY RESPONSE
# ToDo: 4.2.16. WAIT FOR ACKNOWLEDGEMENT (WACK) RESPONSE
# 4.2.17. NODE STATUS REQUEST
class NODE_STATUS_REQUEST(NAME_QUERY_REQUEST):
def __init__(self, data=None):
NAME_QUERY_REQUEST.__init__(self,data)
self['FLAGS'] = 0
self['QUESTION_TYPE'] = QUESTION_TYPE_NBSTAT
# 4.2.18. NODE STATUS RESPONSE
class NODE_NAME_ENTRY(Structure):
structure = (
('NAME','15s=""'),
('TYPE','B=0'),
('NAME_FLAGS','>H'),
)
class STATISTICS(Structure):
structure = (
('UNIT_ID','6s=""'),
('JUMPERS','B'),
('TEST_RESULT','B'),
('VERSION_NUMBER','>H'),
('PERIOD_OF_STATISTICS','>H'),
('NUMBER_OF_CRCs','>H'),
('NUMBER_ALIGNMENT_ERRORS','>H'),
('NUMBER_OF_COLLISIONS','>H'),
('NUMBER_SEND_ABORTS','>H'),
('NUMBER_GOOD_SENDS','>L'),
('NUMBER_GOOD_RECEIVES','>L'),
('NUMBER_RETRANSMITS','>H'),
('NUMBER_NO_RESOURCE_CONDITIONS','>H'),
('NUMBER_FREE_COMMAND_BLOCKS','>H'),
('TOTAL_NUMBER_COMMAND_BLOCKS','>H'),
('MAX_TOTAL_NUMBER_COMMAND_BLOCKS','>H'),
('NUMBER_PENDING_SESSIONS','>H'),
('MAX_NUMBER_PENDING_SESSIONS','>H'),
('MAX_TOTAL_SESSIONS_POSSIBLE','>H'),
('SESSION_DATA_PACKET_SIZE','>H'),
)
class NetBIOS:
# Creates a NetBIOS instance without specifying any default NetBIOS domain nameserver.
# All queries will be sent through the servport.
def __init__(self, servport = NETBIOS_NS_PORT):
self.__servport = NETBIOS_NS_PORT
self.__nameserver = None
self.__broadcastaddr = BROADCAST_ADDR
self.mac = b'00-00-00-00-00-00'
def _setup_connection(self, dstaddr, timeout=None):
port = rand.randint(10000, 60000)
af, socktype, proto, _canonname, _sa = socket.getaddrinfo(dstaddr, port, socket.AF_INET, socket.SOCK_DGRAM)[0]
s = socket.socket(af, socktype, proto)
has_bind = 1
for _i in range(0, 10):
# We try to bind to a port for 10 tries
try:
s.bind((INADDR_ANY, rand.randint(10000, 60000)))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
has_bind = 1
except socket.error:
pass
if not has_bind:
raise NetBIOSError('Cannot bind to a good UDP port', ERRCLASS_OS, errno.EAGAIN)
self.__sock = s
def send(self, request, destaddr, timeout):
self._setup_connection(destaddr)
tries = 3
while 1:
try:
self.__sock.sendto(request.getData(), 0, (destaddr, self.__servport))
ready, _, _ = select.select([self.__sock.fileno()], [], [], timeout)
if not ready:
if tries:
# Retry again until tries == 0
tries -= 1
else:
raise NetBIOSTimeout
else:
try:
data, _ = self.__sock.recvfrom(65536, 0)
except Exception as e:
raise NetBIOSError("recvfrom error: %s" % str(e))
self.__sock.close()
res = NAME_SERVICE_PACKET(data)
if res['NAME_TRN_ID'] == request['NAME_TRN_ID']:
if (res['FLAGS'] & 0xf) > 0:
raise NetBIOSError('Negative response', ERRCLASS_QUERY, res['FLAGS'] & 0xf)
return res
except select.error as ex:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise NetBIOSError('Error occurs while waiting for response', ERRCLASS_OS, ex[0])
except socket.error as ex:
raise NetBIOSError('Connection error: %s' % str(ex))
# Set the default NetBIOS domain nameserver.
def set_nameserver(self, nameserver):
self.__nameserver = nameserver
# Return the default NetBIOS domain nameserver, or None if none is specified.
def get_nameserver(self):
return self.__nameserver
# Set the broadcast address to be used for query.
def set_broadcastaddr(self, broadcastaddr):
self.__broadcastaddr = broadcastaddr
# Return the broadcast address to be used, or BROADCAST_ADDR if default broadcast address is used.
def get_broadcastaddr(self):
return self.__broadcastaddr
# Returns a NBPositiveNameQueryResponse instance containing the host information for nbname.
# If a NetBIOS domain nameserver has been specified, it will be used for the query.
# Otherwise, the query is broadcasted on the broadcast address.
def gethostbyname(self, nbname, qtype = TYPE_WORKSTATION, scope = None, timeout = 1):
resp = self.name_query_request(nbname, self.__nameserver, qtype, scope, timeout)
return resp
# Returns a list of NBNodeEntry instances containing node status information for nbname.
# If destaddr contains an IP address, then this will become an unicast query on the destaddr.
# Raises NetBIOSTimeout if timeout (in secs) is reached.
# Raises NetBIOSError for other errors
def getnodestatus(self, nbname, destaddr = None, type = TYPE_WORKSTATION, scope = None, timeout = 1):
if destaddr:
return self.node_status_request(nbname, destaddr, type, scope, timeout)
else:
return self.node_status_request(nbname, self.__nameserver, type, scope, timeout)
def getnetbiosname(self, ip):
entries = self.getnodestatus('*',ip)
entries = [x for x in entries if x['TYPE'] == TYPE_SERVER]
return entries[0]['NAME'].strip().decode('latin-1')
def getmacaddress(self):
return self.mac
def name_registration_request(self, nbname, destaddr, qtype, scope, nb_flags=0, nb_address='0.0.0.0'):
netbios_name = nbname.upper()
qn_label = encode_name(netbios_name, qtype, scope)
p = NAME_REGISTRATION_REQUEST()
p['NAME_TRN_ID'] = rand.randint(1, 32000)
p['QUESTION_NAME'] = qn_label[:-1]
p['RR_NAME'] = qn_label[:-1]
p['TTL'] = 0xffff
p['NB_FLAGS'] = nb_flags
p['NB_ADDRESS'] = socket.inet_aton(nb_address)
if not destaddr:
p['FLAGS'] |= NM_FLAGS_BROADCAST
destaddr = self.__broadcastaddr
req = p.getData()
res = self.send(p, destaddr, 1)
return res
def name_query_request(self, nbname, destaddr = None, qtype = TYPE_SERVER, scope = None, timeout = 1):
netbios_name = nbname.upper()
qn_label = encode_name(netbios_name, qtype, scope)
p = NAME_QUERY_REQUEST()
p['NAME_TRN_ID'] = rand.randint(1, 32000)
p['QUESTION_NAME'] = qn_label[:-1]
p['FLAGS'] = NM_FLAGS_RD
if not destaddr:
p['FLAGS'] |= NM_FLAGS_BROADCAST
destaddr = self.__broadcastaddr
req = p.getData()
res = self.send(p, destaddr, timeout)
return NBPositiveNameQueryResponse(res['ANSWERS'])
def node_status_request(self, nbname, destaddr, type, scope, timeout):
netbios_name = nbname.upper()
qn_label = encode_name(netbios_name, type, scope)
p = NODE_STATUS_REQUEST()
p['NAME_TRN_ID'] = rand.randint(1, 32000)
p['QUESTION_NAME'] = qn_label[:-1]
if not destaddr:
p['FLAGS'] = NM_FLAGS_BROADCAST
destaddr = self.__broadcastaddr
res = self.send(p, destaddr, timeout)
answ = NBNodeStatusResponse(res['ANSWERS'])
self.mac = answ.get_mac()
return answ.entries
################################################################################
# 4.2 SESSION SERVICE PACKETS
################################################################################
class NetBIOSSessionPacket:
def __init__(self, data=0):
self.type = 0x0
self.flags = 0x0
self.length = 0x0
if data == 0:
self._trailer = b''
else:
try:
self.type = ord(data[0])
if self.type == NETBIOS_SESSION_MESSAGE:
self.length = ord(data[1]) << 16 | (unpack('!H', data[2:4])[0])
else:
self.flags = ord(data[1])
self.length = unpack('!H', data[2:4])[0]
self._trailer = data[4:]
except:
raise NetBIOSError('Wrong packet format ')
def set_type(self, type):
self.type = type
def get_type(self):
return self.type
def rawData(self):
if self.type == NETBIOS_SESSION_MESSAGE:
data = pack('!BBH', self.type, self.length >> 16, self.length & 0xFFFF) + self._trailer
else:
data = pack('!BBH', self.type, self.flags, self.length) + self._trailer
return data
def set_trailer(self, data):
self._trailer = data
self.length = len(data)
def get_length(self):
return self.length
def get_trailer(self):
return self._trailer
class NetBIOSSession:
def __init__(self, myname, remote_name, remote_host, remote_type=TYPE_SERVER, sess_port=NETBIOS_SESSION_PORT,
timeout=None, local_type=TYPE_WORKSTATION, sock=None):
"""
:param unicode myname: My local NetBIOS name
:param unicode remote_name: Remote NetBIOS name
:param unicode remote_host: Remote IP Address
:param integer remote_type: NetBIOS Host type
:param integer sess_port: Session port to connect (139,445)
:param integer timeout: Timeout for connection
:param integer local_type: My Local Host Type
:param socket sock: Socket for already established connection
"""
if len(myname) > 15:
self.__myname = string.upper(myname[:15])
else:
self.__myname = string.upper(myname)
self.__local_type = local_type
assert remote_name
# if destination port SMB_SESSION_PORT and remote name *SMBSERVER, we're changing it to its IP address
# helping solving the client mistake ;)
if remote_name == '*SMBSERVER' and sess_port == SMB_SESSION_PORT:
remote_name = remote_host
# If remote name is *SMBSERVER let's try to query its name.. if can't be guessed, continue and hope for the best
if remote_name == '*SMBSERVER':
nb = NetBIOS()
try:
res = nb.getnetbiosname(remote_host)
except:
res = None
pass
if res is not None:
remote_name = res
if len(remote_name) > 15:
self.__remote_name = string.upper(remote_name[:15])
else:
self.__remote_name = string.upper(remote_name)
self.__remote_type = remote_type
self.__remote_host = remote_host
if sock is not None:
# We are acting as a server
self._sock = sock
else:
self._sock = self._setup_connection((remote_host, sess_port), timeout)
if sess_port == NETBIOS_SESSION_PORT:
self._request_session(remote_type, local_type, timeout)
def _request_session(self, remote_type, local_type, timeout):
raise NotImplementedError('Not Implemented!')
def _setup_connection(self, peer, timeout=None):
raise NotImplementedError('Not Implemented!')
def get_myname(self):
return self.__myname
def get_mytype(self):
return self.__local_type
def get_remote_host(self):
return self.__remote_host
def get_remote_name(self):
return self.__remote_name
def get_remote_type(self):
return self.__remote_type
def close(self):
self._sock.close()
def get_socket(self):
return self._sock
class NetBIOSUDPSessionPacket(Structure):
TYPE_DIRECT_UNIQUE = 16
TYPE_DIRECT_GROUP = 17
FLAGS_MORE_FRAGMENTS = 1
FLAGS_FIRST_FRAGMENT = 2
FLAGS_B_NODE = 0
structure = (
('Type','B=16'), # Direct Unique Datagram
('Flags','B=2'), # FLAGS_FIRST_FRAGMENT
('ID','<H'),
('_SourceIP','>L'),
('SourceIP','"'),
('SourcePort','>H=138'),
('DataLegth','>H-Data'),
('Offset','>H=0'),
('SourceName','z'),
('DestinationName','z'),
('Data',':'),
)
def getData(self):
addr = self['SourceIP'].split('.')
addr = [int(x) for x in addr]
addr = (((addr[0] << 8) + addr[1] << 8) + addr[2] << 8) + addr[3]
self['_SourceIP'] = addr
return Structure.getData(self)
def get_trailer(self):
return self['Data']
class NetBIOSUDPSession(NetBIOSSession):
def _setup_connection(self, peer, timeout=None):
af, socktype, proto, canonname, sa = socket.getaddrinfo(peer[0], peer[1], 0, socket.SOCK_DGRAM)[0]
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((INADDR_ANY, 138))
self.peer = peer
return sock
def _request_session(self, remote_type, local_type, timeout = None):
pass
def next_id(self):
if hasattr(self, '__dgram_id'):
answer = self.__dgram_id
else:
self.__dgram_id = rand.randint(1,65535)
answer = self.__dgram_id
self.__dgram_id += 1
return answer
def send_packet(self, data):
# Yes... I know...
self._sock.connect(self.peer)
p = NetBIOSUDPSessionPacket()
p['ID'] = self.next_id()
p['SourceIP'] = self._sock.getsockname()[0]
p['SourceName'] = encode_name(self.get_myname(), self.get_mytype(), '')[:-1]
p['DestinationName'] = encode_name(self.get_remote_name(), self.get_remote_type(), '')[:-1]
p['Data'] = data
self._sock.sendto(str(p), self.peer)
self._sock.close()
self._sock = self._setup_connection(self.peer)
def recv_packet(self, timeout = None):
# The next loop is a workaround for a bigger problem:
# When data reaches higher layers, the lower headers are lost,
# and with them, for example, the source IP. Hence, SMB users
# can't know where packets are coming from... we need a better
# solution, right now, we will filter everything except packets
# coming from the remote_host specified in __init__()
while 1:
data, peer = self._sock.recvfrom(8192)
# print "peer: %r self.peer: %r" % (peer, self.peer)
if peer == self.peer: break
return NetBIOSUDPSessionPacket(data)
class NetBIOSTCPSession(NetBIOSSession):
def __init__(self, myname, remote_name, remote_host, remote_type=TYPE_SERVER, sess_port=NETBIOS_SESSION_PORT,
timeout=None, local_type=TYPE_WORKSTATION, sock=None, select_poll=False):
"""
:param unicode myname: My local NetBIOS name
:param unicode remote_name: Remote NetBIOS name
:param unicode remote_host: Remote IP Address
:param integer remote_type: NetBIOS Host type
:param integer sess_port: Session port to connect (139,445)
:param integer timeout: Timeout for connection
:param integer local_type: My Local Host Type
:param socket sock: Socket for already established connection
:param boolean select_poll: Type of polling mechanism
"""
self.__select_poll = select_poll
if self.__select_poll:
self.read_function = self.polling_read
else:
self.read_function = self.non_polling_read
NetBIOSSession.__init__(self, myname, remote_name, remote_host, remote_type=remote_type, sess_port=sess_port,
timeout=timeout, local_type=local_type, sock=sock)
def _setup_connection(self, peer, timeout=None):
try:
af, socktype, proto, canonname, sa = socket.getaddrinfo(peer[0], peer[1], 0, socket.SOCK_STREAM)[0]
sock = socket.socket(af, socktype, proto)
oldtimeout = sock.gettimeout()
sock.settimeout(timeout)
sock.connect(sa)
sock.settimeout(oldtimeout)
except socket.error as e:
raise socket.error("Connection error (%s:%s)" % (peer[0], peer[1]), e)
return sock
def send_packet(self, data):
p = NetBIOSSessionPacket()
p.set_type(NETBIOS_SESSION_MESSAGE)
p.set_trailer(data)
self._sock.sendall(p.rawData())
def recv_packet(self, timeout = None):
data = self.__read(timeout)
return NetBIOSSessionPacket(data)
def _request_session(self, remote_type, local_type, timeout = None):
p = NetBIOSSessionPacket()
remote_name = encode_name(self.get_remote_name(), remote_type, '')
myname = encode_name(self.get_myname(), local_type, '')
p.set_type(NETBIOS_SESSION_REQUEST)
p.set_trailer(remote_name.encode('latin-1') + myname.encode('latin-1'))
self._sock.sendall(p.rawData())
while 1:
p = self.recv_packet(timeout)
if p.get_type() == NETBIOS_SESSION_NEGATIVE_RESPONSE:
raise NetBIOSError('Cannot request session (Called Name:%s)' % self.get_remote_name())
elif p.get_type() == NETBIOS_SESSION_POSITIVE_RESPONSE:
break
else:
# Ignore all other messages, most probably keepalive messages
pass
def polling_read(self, read_length, timeout):
data = b''
if timeout is None:
timeout = 3600
time_left = timeout
CHUNK_TIME = 0.025
bytes_left = read_length
while bytes_left > 0:
try:
ready, _, _ = select.select([self._sock.fileno()], [], [], 0)
if not ready:
if time_left <= 0:
raise NetBIOSTimeout
else:
time.sleep(CHUNK_TIME)
time_left -= CHUNK_TIME
continue
received = self._sock.recv(bytes_left)
if len(received) == 0:
raise NetBIOSError('Error while reading from remote', ERRCLASS_OS, None)
data = data + received
bytes_left = read_length - len(data)
except select.error as ex:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise NetBIOSError('Error occurs while reading from remote', ERRCLASS_OS, ex[0])
return data
def non_polling_read(self, read_length, timeout):
data = b''
bytes_left = read_length
while bytes_left > 0:
try:
ready, _, _ = select.select([self._sock.fileno()], [], [], timeout)
if not ready:
raise NetBIOSTimeout
received = self._sock.recv(bytes_left)
if len(received) == 0:
raise NetBIOSError('Error while reading from remote', ERRCLASS_OS, None)
data = data + received
bytes_left = read_length - len(data)
except select.error as ex:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise NetBIOSError('Error occurs while reading from remote', ERRCLASS_OS, ex[0])
return data
def __read(self, timeout = None):
data = self.read_function(4, timeout)
type, flags, length = unpack('>ccH', data)
if ord(type) == NETBIOS_SESSION_MESSAGE:
length |= ord(flags) << 16
else:
if ord(flags) & 0x01:
length |= 0x10000
data2 = self.read_function(length, timeout)
return data + data2
|
the-stack_0_9548 | # -*- coding: utf-8 -*-
"""
Doc serving from Python.
In production there are two modes,
* Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)
* Serving from private symlinks in Python (readthedocs.com only)
In development, we have two modes:
* Serving from public symlinks in Python
* Serving from private symlinks in Python
This means we should only serve from public symlinks in dev,
and generally default to serving from private symlinks in Python only.
Privacy
-------
These views will take into account the version privacy level.
Settings
--------
PYTHON_MEDIA (False) - Set this to True to serve docs & media from Python
SERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import logging
import mimetypes
import os
from functools import wraps
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.static import serve
from readthedocs.builds.models import Version
from readthedocs.core.permissions import AdminPermission
from readthedocs.core.resolver import resolve, resolve_path
from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
from readthedocs.projects import constants
from readthedocs.projects.models import Project, ProjectRelationship
log = logging.getLogger(__name__)
def map_subproject_slug(view_func):
"""
A decorator that maps a ``subproject_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa
if subproject is None and subproject_slug:
try:
subproject = Project.objects.get(slug=subproject_slug)
except Project.DoesNotExist:
try:
# Depends on a project passed into kwargs
rel = ProjectRelationship.objects.get(
parent=kwargs['project'],
alias=subproject_slug,
)
subproject = rel.child
except (ProjectRelationship.DoesNotExist, KeyError):
raise Http404
return view_func(request, subproject=subproject, *args, **kwargs)
return inner_view
def map_project_slug(view_func):
"""
A decorator that maps a ``project_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa
if project is None:
if not project_slug:
project_slug = request.slug
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
raise Http404('Project does not exist.')
return view_func(request, project=project, *args, **kwargs)
return inner_view
@map_project_slug
@map_subproject_slug
def redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument
"""Handle / -> /en/latest/ directs on subdomains."""
return HttpResponseRedirect(resolve(subproject or project))
@map_project_slug
@map_subproject_slug
def redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa
"""Redirect /page/file.html to /en/latest/file.html."""
return HttpResponseRedirect(
resolve(subproject or project, filename=filename))
def _serve_401(request, project):
res = render(request, '401.html')
res.status_code = 401
log.error('Unauthorized access to {0} documentation'.format(project.slug))
return res
def _serve_file(request, filename, basepath):
# Serve the file from the proper location
if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):
# Serve from Python
return serve(request, filename, basepath)
else:
# Serve from Nginx
content_type, encoding = mimetypes.guess_type(
os.path.join(basepath, filename))
content_type = content_type or 'application/octet-stream'
response = HttpResponse(content_type=content_type)
if encoding:
response['Content-Encoding'] = encoding
try:
response['X-Accel-Redirect'] = os.path.join(
basepath[len(settings.SITE_ROOT):],
filename,
)
except UnicodeEncodeError:
raise Http404
return response
@map_project_slug
@map_subproject_slug
def serve_docs(
request, project, subproject, lang_slug=None, version_slug=None,
filename=''):
"""Exists to map existing proj, lang, version, filename views to the file format."""
if not version_slug:
version_slug = project.get_default_version()
try:
version = project.versions.public(request.user).get(slug=version_slug)
except Version.DoesNotExist:
# Properly raise a 404 if the version doesn't exist & a 401 if it does
if project.versions.filter(slug=version_slug).exists():
return _serve_401(request, project)
raise Http404('Version does not exist.')
filename = resolve_path(
subproject or project, # Resolve the subproject if it exists
version_slug=version_slug,
language=lang_slug,
filename=filename,
subdomain=True, # subdomain will make it a "full" path without a URL prefix
)
if (version.privacy_level == constants.PRIVATE and
not AdminPermission.is_member(user=request.user, obj=project)):
return _serve_401(request, project)
return _serve_symlink_docs(
request,
filename=filename,
project=project,
privacy_level=version.privacy_level,
)
@map_project_slug
def _serve_symlink_docs(request, project, privacy_level, filename=''):
"""Serve a file by symlink, or a 404 if not found."""
# Handle indexes
if filename == '' or filename[-1] == '/':
filename += 'index.html'
# This breaks path joining, by ignoring the root when given an "absolute" path
if filename[0] == '/':
filename = filename[1:]
log.info('Serving %s for %s', filename, project)
files_tried = []
serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])
if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa
public_symlink = PublicSymlink(project)
basepath = public_symlink.project_root
if os.path.exists(os.path.join(basepath, filename)):
return _serve_file(request, filename, basepath)
else:
files_tried.append(os.path.join(basepath, filename))
if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa
# Handle private
private_symlink = PrivateSymlink(project)
basepath = private_symlink.project_root
if os.path.exists(os.path.join(basepath, filename)):
return _serve_file(request, filename, basepath)
else:
files_tried.append(os.path.join(basepath, filename))
raise Http404(
'File not found. Tried these files: %s' % ','.join(files_tried))
|
the-stack_0_9549 | import re
def get_message(fault):
match = re.match(r"<class 'Exception'>:([\w\s]+)", fault.faultString)
return 'Server error: "{0}"'.format(match.group(1))
def ask_user(message):
while True:
print(message)
answer = input()
if re.match(r'\w', answer):
return answer
print('Invalid input')
def format_posts(posts):
def format_post(post):
mask = 'Subject: {0}\nCreation: {1}\nTitle: {2}\nBody: {3}'
return mask.format(
post['subject'],
post['creation'],
post['title'],
post['body']
)
return '\n\n'.join(map(format_post, posts))
|
the-stack_0_9550 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pack relocations in a library (or copy unchanged).
If --enable-packing and --configuration-name=='Release', invoke the
relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given
library files. This step is inserted after the libraries are stripped.
If --enable-packing is zero, the script copies files verbatim, with no
attempt to pack relocations.
Any library listed in --exclude-packing-list is also copied verbatim,
irrespective of any --enable-packing setting. Typically this would be
'libchromium_android_linker.so'.
"""
import optparse
import os
import shutil
import sys
import tempfile
from util import build_utils
def PackLibraryRelocations(android_pack_relocations, library_path, output_path):
shutil.copy(library_path, output_path)
pack_command = [android_pack_relocations, output_path]
build_utils.CheckOutput(pack_command)
def CopyLibraryUnchanged(library_path, output_path):
shutil.copy(library_path, output_path)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--clear-dir', action='store_true',
help='If set, the destination directory will be deleted '
'before copying files to it. This is highly recommended to '
'ensure that no stale files are left in the directory.')
parser.add_option('--configuration-name',
default='Release',
help='Gyp configuration name (i.e. Debug, Release)')
parser.add_option('--enable-packing',
choices=['0', '1'],
help=('Pack relocations if 1 and configuration name is \'Release\','
' otherwise plain file copy'))
parser.add_option('--exclude-packing-list',
default='',
help='Names of any libraries explicitly not packed')
parser.add_option('--android-pack-relocations',
help='Path to the relocations packer binary')
parser.add_option('--stripped-libraries-dir',
help='Directory for stripped libraries')
parser.add_option('--packed-libraries-dir',
help='Directory for packed libraries')
parser.add_option('--libraries', action='append',
help='List of libraries')
parser.add_option('--stamp', help='Path to touch on success')
parser.add_option('--filelistjson',
help='Output path of filelist.json to write')
options, _ = parser.parse_args(args)
enable_packing = (options.enable_packing == '1' and
options.configuration_name == 'Release')
exclude_packing_set = set(build_utils.ParseGypList(
options.exclude_packing_list))
libraries = []
for libs_arg in options.libraries:
libraries += build_utils.ParseGypList(libs_arg)
if options.clear_dir:
build_utils.DeleteDirectory(options.packed_libraries_dir)
build_utils.MakeDirectory(options.packed_libraries_dir)
output_paths = []
for library in libraries:
library_path = os.path.join(options.stripped_libraries_dir, library)
output_path = os.path.join(
options.packed_libraries_dir, os.path.basename(library))
output_paths.append(output_path)
if enable_packing and library not in exclude_packing_set:
PackLibraryRelocations(options.android_pack_relocations,
library_path,
output_path)
else:
CopyLibraryUnchanged(library_path, output_path)
if options.filelistjson:
build_utils.WriteJson({ 'files': output_paths }, options.filelistjson)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_0_9551 | # -*- coding: utf-8 -*-
import datetime
from iemlav import logger
import multiprocessing
import netfilterqueue
from iemlav.lib.firewall.packet_filter import PacketFilter
from iemlav.lib.firewall.firewall_monitor import FirewallMonitor
from iemlav.lib.firewall import utils
class FirewallEngine(object):
def __init__(self, cred, debug=False):
"""Initialize FirewallEngine."""
self.cred = cred
self.logger = logger.IemlAVLogger(
__name__,
debug
)
# Parse and setup rules and actions
(self.ip_inbound,
self.action_inbound_IPRule) = self.parse_inbound_IPRule()
(self.ip_outbound,
self.action_outbound_IPRule) = self.parse_outbound_IPRule()
(self.protocols,
self.action_protocolRule) = self.parse_protocolRule()
(self.sports,
self.action_source_portRule) = self.parse_source_portRule()
(self.dports,
self.action_dest_portRule) = self.parse_dest_portRule()
(self.dns,
self.action_DNSRule) = self.parse_DNSRule()
(self.extensions,
self.action_scanLoad) = self.parse_scanLoad()
self.action_HTTPRequest = self.parse_HTTPRequest()
self.action_HTTPResponse = self.parse_HTTPResponse()
# Interface
self.interface = str(self.cred['interface'])
if self.interface == "":
self.interface = utils.get_interface()
# Setup PacketFilter object
self.packetFilterObj = PacketFilter(interface=self.interface,
debug=debug,
ip_inbound=self.ip_inbound,
ip_outbound=self.ip_outbound,
protocols=self.protocols,
dns=self.dns,
dports=self.dports,
sports=self.sports,
extensions=self.extensions,
action_inbound_IPRule=self.action_inbound_IPRule,
action_outbound_IPRule=self.action_outbound_IPRule,
action_DNSRule=self.action_DNSRule,
action_source_portRule=self.action_source_portRule,
action_dest_portRule=self.action_dest_portRule,
action_HTTPResponse=self.action_HTTPResponse,
action_HTTPRequest=self.action_HTTPRequest,
action_protocolRule=self.action_protocolRule,
action_scanLoad=self.action_scanLoad)
# Setup Montior object
self.monitorObj = FirewallMonitor(interface=self.interface,
debug=debug)
# Integrations
self.integrations = ['Firewall',
'Monitor']
@staticmethod
def restore_state():
resp = utils.excecute_command('iptables --flush')
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
def parse_inbound_IPRule(self):
try:
action = int(self.cred['inbound_IPRule']['action'])
temp_ip_inbound = []
if len(self.cred['inbound_IPRule']['ip_inbound']):
list_of_IPs = str(self.cred['inbound_IPRule']['ip_inbound'])
list_of_IPs = list_of_IPs.split(',')
for IP in list_of_IPs:
if '-' in IP:
for new_ip in utils.generate_IPs(IP):
if (new_ip not in temp_ip_inbound and
utils.check_ip(new_ip)):
temp_ip_inbound.append(str(new_ip).strip())
elif (utils.check_ip(IP)):
if IP not in temp_ip_inbound:
temp_ip_inbound.append(str(IP).strip())
return temp_ip_inbound, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_outbound_IPRule(self):
try:
action = int(self.cred['outbound_IPRule']['action'])
temp_ip_outbound = []
if len(self.cred['outbound_IPRule']['ip_outbound']):
list_of_IPs = str(self.cred['outbound_IPRule']['ip_outbound'])
list_of_IPs = list_of_IPs.split(',')
for IP in list_of_IPs:
if '-' in IP:
for new_ip in utils.generate_IPs(IP):
if (new_ip not in temp_ip_outbound and
utils.check_ip(new_ip)):
temp_ip_outbound.append(str(new_ip).strip())
elif (utils.check_ip(IP)):
if IP not in temp_ip_outbound:
temp_ip_outbound.append(str(IP).strip())
return temp_ip_outbound, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_protocolRule(self):
try:
temp_protocol = []
action = int(self.cred['protocolRule']['action'])
if len(self.cred['protocolRule']['protocols']):
protocols = str(self.cred['protocolRule']['protocols'])
protocols = protocols.split(',')
protocols = map(utils.map_protocol, protocols)
protocols = list(protocols)
for protocol in protocols:
if (protocol and
protocol not in temp_protocol):
temp_protocol.append(protocol)
return temp_protocol, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_DNSRule(self):
try:
temp_DNS = []
action = int(self.cred['DNSRule']['action'])
if len(self.cred['DNSRule']['dns']):
dns = str(self.cred['DNSRule']['dns'])
dns = dns.split(',')
for single_dns in dns:
if single_dns not in temp_DNS:
temp_DNS.append(str(single_dns).strip())
return temp_DNS, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_source_portRule(self):
try:
temp_sports = []
action = int(self.cred['source_portRule']['action'])
if len(self.cred['source_portRule']['sports']):
sports = str(self.cred['source_portRule']['sports'])
sports = sports.split(',')
for port in sports:
if '-' in port:
for new_port in utils.generate_ports(port):
if (new_port not in temp_sports and
utils.check_port(new_port)):
temp_sports.append(str(new_port).strip())
elif utils.check_port(port):
if port not in temp_sports:
temp_sports.append(str(port).strip())
return temp_sports, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_dest_portRule(self):
try:
temp_dports = []
action = int(self.cred['dest_portRule']['action'])
if len(self.cred['dest_portRule']['dports']):
dports = str(self.cred['dest_portRule']['dports'])
dports = dports.split(',')
for port in dports:
if '-' in port:
for new_port in utils.generate_ports(port):
if (new_port not in temp_dports and
utils.check_port(new_port)):
temp_dports.append(str(new_port).strip())
elif utils.check_port(port):
if port not in temp_dports:
temp_dports.append(str(port).strip())
return temp_dports, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_HTTPResponse(self):
"""
Parse HTTPResponse configurations.
Args:
None
Raises:
None
Returns:
action (int): 0 or 1
"""
try:
action = int(self.cred['HTTPResponse']['action'])
return action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Allow HTTPResponse
return 1
def parse_HTTPRequest(self):
"""
Parse HTTPRequest configurations.
Args:
None
Raises:
None
Returns:
action (int): 0 or 1
"""
try:
action = int(self.cred['HTTPRequest']['action'])
return action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Allow HTTPRequest
return 1
def parse_scanLoad(self):
try:
temp_extension = []
action = int(self.cred['scanLoad']['action'])
if len(self.cred['scanLoad']['extensions']):
extensions = str(self.cred['scanLoad']['extensions'])
extensions = extensions.split(',')
for extension in extensions:
if extension not in temp_extension:
temp_extension.append(str(extension).strip())
return temp_extension, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_time(self):
try:
current_time = datetime.datetime.now()
time_lb = self.cred['time']['time_lb']
time_ub = self.cred['time']['time_ub']
datetime_lb = current_time.replace(hour=int((time_lb).split(':')[0]),
minute=int((time_lb).split(':')[1]))
datetime_ub = current_time.replace(hour=int((time_ub).split(':')[0]),
minute=int((time_ub).split(':')[1]))
if (current_time > datetime_lb and
current_time < datetime_ub):
return True
else:
return False
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
def process_packet(self, pkt):
if (self.packetFilterObj.process(pkt) and
self.parse_time):
pkt.accept()
else:
pkt.drop()
def startFirewall(self):
input_command = 'iptables -I INPUT -j NFQUEUE --queue-num 0'
output_command = 'iptables -I OUTPUT -j NFQUEUE --queue-num 0'
resp = utils.excecute_command(input_command)
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
resp = utils.excecute_command(output_command)
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
try:
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, self.process_packet)
queue.run()
except KeyboardInterrupt:
# Restore iptables state
self.restore_state()
def startMonitor(self):
self.monitorObj.startMonitoring()
def startEngine(self):
processes = []
firewallProcess = multiprocessing.Process(target=self.startFirewall)
monitorProcess = multiprocessing.Process(target=self.startMonitor)
firewallProcess.start()
monitorProcess.start()
processes.append(firewallProcess)
processes.append(monitorProcess)
self.logger.log(
"Integrations: " + str(self.integrations),
logtype="info"
)
for process in processes:
process.join()
|
the-stack_0_9553 | from collections import defaultdict, Counter, deque
from functools import cache
from itertools import product, pairwise
from multiprocessing import Pool
import math
import re
non_digits = re.compile('[^0-9]+')
def sign(a, b, step=1):
return int(math.copysign(step, b-a))
def autorange(a,b, step=1):
if a == b:return (a,)
s = sign(a, b, step)
return range(a, b+s, s)
def get_ints(line, strip_line=False):
if strip_line:
line = line.strip()
return [*map(int, non_digits.split(line))]
grid_char = {'.': '.', (0,1): 'v', (1,0):'>'}
def d25(inp, sample=False):
p1, p2 = None, None
grid = {}
max_x, max_y = 0, 0
for y, line in enumerate(inp.split()):
max_y = max(y+1, max_y)
for x, char in enumerate(line):
max_x = max(x+1, max_x)
if char == '>':
grid[x,y] = (1,0)
elif char == 'v':
grid[x,y] = (0,1)
turn = 0
moved = True
n_grid = {}
while moved:
# if turn in (0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 55, 56, 57, 58):
# print(f"After {turn} steps:")
# for y in range(max_y):
# for x in range(max_x):
# print(grid_char[grid.get((x,y), '.')], end='')
# print()
turn += 1
moved = False
for (x,y), (dx, dy) in grid.items():
if dy:
n_grid[x,y] = grid[x,y]
continue
nt = nx, ny = (x+dx)%max_x, (y+dy)%max_y
if grid.get(nt, None) is None:
n_grid[nt] = dx,dy
moved = True
else:
n_grid[x,y] = dx,dy
grid = n_grid
n_grid = {}
for (x,y), (dx, dy) in grid.items():
if dx:
n_grid[x,y] = grid[x,y]
continue
nt = nx, ny = (x+dx)%max_x, (y+dy)%max_y
if grid.get(nt, None) is None:
n_grid[nt] = dx,dy
moved = True
else:
n_grid[x,y] = dx,dy
grid = n_grid
n_grid = {}
p1 = turn
return p1, p2
def validate_test(case_id, inp=None, want_p1=None, want_p2=None):
do_p1, do_p2 = False, False
#print(f"validate_test({case_id}, {inp}, {want_p1}, {want_p2})")
got_p1, got_p2 = d25(inp, sample=True)
if want_p1 is not None:
assert want_p1 == got_p1, f"{case_id=} p1:\n\t{want_p1=}\n\t{got_p1=}"
do_p1 = True
if want_p2 is not None:
assert want_p2 == got_p2, f"{case_id=} p2:\n\t{want_p2=}\n\t{got_p2=}"
do_p2 = True
return True, do_p1, do_p2
def main():
with open('../inputs/d25.txt') as f:
inp = f.read().strip()
return d25(inp)
if __name__ == '__main__':
cases = [
#(id, inp, p1, p2),
(0, """v...>>.vv>
.vv>>.vv..
>>.>v>...v
>>v>>.>.v.
v>v.vv.v..
>.>>..v...
.vv..>.>v.
v.v..>>v.v
....v..v.>""", 58, None),
]
"""
# Non multiprocessing version
for case in cases:
validate_test(*case)
p1, p2 = main()
print(f"p1 = {p1}\np2 = {p2}")
"""
with Pool(processes=min(8, len(cases) + 1)) as pool:
main_res = pool.apply_async(main)
test_res = [pool.apply_async(validate_test, case) for case in cases]
test_pass, do_p1, do_p2 = True, False, False
for test in test_res:
tp, dp1, dp2 = test.get(30)
test_pass &= tp
do_p1 |= dp1
do_p2 |= dp2
if test_pass:
p1, p2 = main_res.get(60)
assert do_p1 or do_p2, "Didn't run any tets"
assert p1 is None or do_p1 == True, "Got P1 value without 'do_p1' set"
assert p2 is None or do_p2 == True, "Got P2 value without 'do_p2' set"
print(f"p1 = {p1}\np2 = {p2}")
|
the-stack_0_9554 | import numpy as np
import matplotlib.pyplot as plt
from math import ceil
from random import randint
import pickle,glob, cv2,os
def get_input_shape(dataPath, segmentationScheme):
imgLoc = "{}/segmentation/{}/training/images/".format(dataPath,segmentationScheme)
labelLoc = "{}/segmentation/{}/labels.pickle".format(dataPath,segmentationScheme)
with open(labelLoc, 'rb') as f:
data = pickle.load(f)
data = data['training']
imageName = data[0]
img = cv2.imread("{}{}.jpg".format(imgLoc,imageName))
return img.shape
def read_n_images(data, start, end, dataPath):
"""
Read images (should be jpg) from a dataset (from indexes start to end).
:param data: list - image names
:param start: int - start index
:param end: int - end index
:param loc: str - directory location of the images
:return: numpy - numpy array of (BGR) image
"""
assert glob.glob(dataPath), "Check directory."
assert glob.glob("{}/*.jpg".format(dataPath)), "Check file extension (should be 'jpg')."
images_list = data[start:end]
images = [cv2.imread("{}/{}.jpg".format(dataPath, image)) for image in images_list]
return np.array(images)
def generate_image_segmentation_labels(method,segmentationScheme ,batchSize, dataDir='', squashOutput=True):
imagePath = "{}/segmentation/{}/{}/images".format(dataDir,segmentationScheme,method)
segmentsPath = "{}/segmentation/{}/{}/labels".format(dataDir,segmentationScheme,method)
labelPath = "{}/segmentation/{}/labels.pickle".format(dataDir,segmentationScheme)
while True:
with open(labelPath, 'rb') as f:
data = pickle.load(f)
methods = list(data.keys())
assert method in methods, "'{}' not a valid mode (must be one of {})".format(method, str(methods))
data = data[method]
for idx in range(0, len(data), batchSize):
start = idx
end = idx + batchSize
images = read_n_images(data, start, end, imagePath)
segmentations = read_n_images(data, start, end, segmentsPath)
if squashOutput == True:
segmentations = segmentations[:,:,:,0]+segmentations[:,:,:,1]+segmentations[:,:,:,2]
segmentations = segmentations
sShape = segmentations.shape
segmentations = segmentations.reshape((sShape[0],sShape[1],sShape[2],1))
yield (images / 255, segmentations / 255)
def get_num_images(method, segmentationScheme, dataDir):
labelPath = "{}/segmentation/{}/labels.pickle".format(dataDir, segmentationScheme)
with open(labelPath, 'rb') as f:
data = pickle.load(f)
data = data[method]
return len(data) |
the-stack_0_9556 | import GCRCatalogs
from GCRCatalogs import GCRQuery
import pandas as pd
import numpy as np
# We load the catalog with addons
cat = GCRCatalogs.load_catalog('dc2_object_run2.2i_dr6_with_addons')
columns_to_get0 = ["objectId", "Ixx_pixel", "Iyy_pixel", "Ixy_pixel", "IxxPSF_pixel", "IyyPSF_pixel", 'IxyPSF_pixel']
#columns_to_get0 = ["objectId"]
columns_to_get2 = ["match_objectId", "cosmodc2_id_truth"]
DF0 = cat.catalogs[0].get_quantities(columns_to_get0)
DF0 = pd.DataFrame(DF0)
print(DF0.head())
DF2 = cat.catalogs[2].get_quantities(columns_to_get2)
DF2 = pd.DataFrame(DF2)
print(DF2.head())
# rename match_objectid in DF2
DF2.rename(columns={"match_objectId":"objectId"}, inplace=True)
DF_merged = pd.merge(DF0, DF2, on=["objectId"])
print(DF_merged.head())
DF_merged.rename(columns={"cosmodc2_id_truth":"cosmoDC2_ID"}, inplace=True)
#get a sense for the ranges of IDs
print(np.sort(DF_merged.loc[DF_merged['cosmoDC2_ID'] > 0, 'cosmoDC2_ID']))
print("Number of nans in Ixx_pixel: ", np.sum(np.isnan(DF_merged['Ixx_pixel'])))
DF_merged = DF_merged.loc[np.logical_not(np.isnan(DF_merged['Ixx_pixel']))] # remove the nans
DF_merged['RSQ_pixel_gal'] = (DF_merged['Ixx_pixel']+DF_merged['Iyy_pixel']) - (DF_merged['IxxPSF_pixel']+DF_merged['IyyPSF_pixel'])
print("saving file.")
print(np.sort(DF_merged.loc[DF_merged['cosmoDC2_ID'] > 0, 'cosmoDC2_ID']))
DF_merged.to_csv("/global/cscratch1/sd/mlokken/sn_hostenv/FullImageMomentsCatalog.tar.gz")
print("Done.") |
the-stack_0_9557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Bruce Smith <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: nictagadm
short_description: Manage nic tags on SmartOS systems
description:
- Create or delete nic tags on SmartOS systems.
version_added: '2.8'
author:
- Bruce Smith (@SmithX10)
options:
name:
description:
- Name of the nic tag.
required: true
type: str
mac:
description:
- Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
- Parameters I(mac) and I(etherstub) are mutually exclusive.
type: str
etherstub:
description:
- Specifies that the nic tag will be attached to a created I(etherstub).
- Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
type: bool
default: no
mtu:
description:
- Specifies the size of the I(mtu) of the desired nic tag.
- Parameters I(mtu) and I(etherstub) are mutually exclusive.
type: int
force:
description:
- When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
type: bool
default: no
state:
description:
- Create or delete a SmartOS nic tag.
type: str
choices: [ absent, present ]
default: present
'''
EXAMPLES = r'''
- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
nictagadm:
name: storage0
mac: 00:1b:21:a3:f5:4d
mtu: 9000
state: present
- name: Remove 'storage0' nic tag
nictagadm:
name: storage0
state: absent
'''
RETURN = r'''
name:
description: nic tag name
returned: always
type: str
sample: storage0
mac:
description: MAC Address that the nic tag was attached to.
returned: always
type: str
sample: 00:1b:21:a3:f5:4d
etherstub:
description: specifies if the nic tag will create and attach to an etherstub.
returned: always
type: bool
sample: False
mtu:
description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
returned: always
type: int
sample: 1500
force:
description: Shows if -f was used during the deletion of a nic tag
returned: always
type: bool
sample: False
state:
description: state of the target
returned: always
type: str
sample: present
'''
from ansible.module_utils.basic import AnsibleModule
import re
class NicTag(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.mac = module.params['mac']
self.etherstub = module.params['etherstub']
self.mtu = module.params['mtu']
self.force = module.params['force']
self.state = module.params['state']
self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
def is_valid_mac(self):
if re.match("[0-9a-f]{2}([:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", self.mac.lower()):
return True
return False
def nictag_exists(self):
cmd = [self.nictagadm_bin]
cmd.append('exists')
cmd.append(self.name)
(rc, dummy, dummy) = self.module.run_command(cmd)
return rc == 0
def add_nictag(self):
cmd = [self.nictagadm_bin]
cmd.append('-v')
cmd.append('add')
if self.etherstub:
cmd.append('-l')
if self.mtu:
cmd.append('-p')
cmd.append('mtu=' + str(self.mtu))
if self.mac:
cmd.append('-p')
cmd.append('mac=' + str(self.mac))
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_nictag(self):
cmd = [self.nictagadm_bin]
cmd.append('-v')
cmd.append('delete')
if self.force:
cmd.append('-f')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
mac=dict(type='str'),
etherstub=dict(type='bool', default=False),
mtu=dict(type='int'),
force=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
mutually_exclusive=[
['etherstub', 'mac'],
['etherstub', 'mtu'],
],
required_if=[
['etherstub', False, ['name', 'mac']],
['state', 'absent', ['name', 'force']],
],
supports_check_mode=True
)
nictag = NicTag(module)
rc = None
out = ''
err = ''
result = dict(
changed=False,
etherstub=nictag.etherstub,
force=nictag.force,
name=nictag.name,
mac=nictag.mac,
mtu=nictag.mtu,
state=nictag.state,
)
if not nictag.is_valid_mac():
module.fail_json(msg='Invalid MAC Address Value',
name=nictag.name,
mac=nictag.mac,
etherstub=nictag.etherstub)
if nictag.state == 'absent':
if nictag.nictag_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nictag.delete_nictag()
if rc != 0:
module.fail_json(name=nictag.name, msg=err, rc=rc)
elif nictag.state == 'present':
if not nictag.nictag_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nictag.add_nictag()
if rc is not None and rc != 0:
module.fail_json(name=nictag.name, msg=err, rc=rc)
if rc is not None:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
the-stack_0_9564 | from web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider
import sys
sys.path.insert(0,"W3CR")
from writer import writer
from reader import reader
import hashlib
import json
import time
import random
import string
from datetime import datetime
class W3CR():
Writer = None
Reader = None
ToReveal = None
Address = None
Password = None
lastTxHash = None
def Commit(self, _data):
key = "".join(random.choices(string.ascii_uppercase, k=9))
dataToCommit = {
"data" : _data,
"key": key,
}
dataBytes = json.dumps(dataToCommit)
self.ToReveal = dataBytes
Hash = hashlib.sha256(dataBytes.encode()).hexdigest()
try:
result = self.Writer.write(Hash)
print("--COMMIT--")
self.lastTxHash = result
except Exception as e:
result = None
print ("--WARNING: Failed To Commit--")
print(e)
print ("--TRY AGAIN--")
time.sleep(60)
self.Commit(_data)
return result
def Reveal(self):
toReveal = {
"payload": json.loads(self.ToReveal),
"commitHash": self.lastTxHash,
}
dataBytes = json.dumps(toReveal)
try:
self.Writer.write(dataBytes)
print("--REVEAL--")
except:
print ("--WARNING: Failed To Reveal--")
print(e)
print ("--TRY AGAIN--")
time.sleep(60)
self.Reveal()
def CheckAndGetData(self, _r):
try:
Tx = self.Reader.read(_r)
if Tx != None:
Proven = False
if len(Tx["input"]) >135:
revealInput = json.loads(self.w3.toText(Tx["input"]))
commitTx = self.w3.eth.getTransaction(revealInput["commitHash"])
if self.w3.toText(commitTx["input"]) == hashlib.sha256(json.dumps(revealInput["payload"]).encode()).hexdigest():
Proven = True
ToReturn = {
"msg" : revealInput["payload"],
"wasCommited" : Proven,
"commitmentBlock" : commitTx["blockNumber"] ,
"txHash" : revealInput["commitHash"]
}
return ToReturn
else:
return "commitTX"
else:
return None
except Exception as e:
print("WARNING: Failed to Read--")
print(e)
return None
def __init__(self, _adr, _psw, _w3, _firstRev):
self.Writer = writer(_psw, _adr, _w3)
self.w3 = _w3
self.Reader = reader(_adr, _w3)
self.Password = _psw
self.Address = _adr
self.ToReveal = json.dumps(_firstRev)
|
the-stack_0_9565 | import datetime
import hashlib
from urllib.parse import urlparse
import json
class Blockchain():
def __init__(self):
self.chain = []
self.nodes = set()
self.create_block(proof = 1, previous_hash = '0')
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'previous_hash': previous_hash,
'proof': proof
}
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while not check_proof:
problem = new_proof**2 - previous_proof**2 #problem should not be symetric
hash_operation = hashlib.sha256(str(problem).encode()).hexdigest()
if hash_operation[:4] == '0000':
#more the number of leading zeros smaller the target and harder to get the proof of work
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_valid_chain(self, chain):
previous_block = chain[0]
previous_index = 1
while previous_index < len(self.chain):
#check if previous_hash is equal to previous block hash
block = chain[previous_index]
if block['previous_hash'] != self.hash(previous_block):
return False
#check if hash_operation of previous_proof and current_proof is below the target
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
#update
previous_block = block
previous_index += 1
return True
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
|
the-stack_0_9566 | #!/usr/bin/env python
from nose.tools import *
import cynetworkx as nx
class TestFloyd:
def setUp(self):
pass
def test_floyd_warshall_predecessor_and_distance(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
assert_equal(dist['s']['v'], 9)
assert_equal(path['s']['v'], 'u')
assert_equal(dist,
{'y': {'y': 0, 'x': 12, 's': 7, 'u': 15, 'v': 6},
'x': {'y': 2, 'x': 0, 's': 9, 'u': 3, 'v': 4},
's': {'y': 7, 'x': 5, 's': 0, 'u': 8, 'v': 9},
'u': {'y': 2, 'x': 2, 's': 9, 'u': 0, 'v': 1},
'v': {'y': 1, 'x': 13, 's': 8, 'u': 16, 'v': 0}})
GG = XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
assert_equal(dist['s']['v'], 8)
# skip this test, could be alternate path s-u-v
# assert_equal(path['s']['v'],'y')
G = nx.DiGraph() # no weights
G.add_edges_from([('s', 'u'), ('s', 'x'),
('u', 'v'), ('u', 'x'),
('v', 'y'), ('x', 'u'),
('x', 'v'), ('x', 'y'),
('y', 's'), ('y', 'v')])
path, dist = nx.floyd_warshall_predecessor_and_distance(G)
assert_equal(dist['s']['v'], 2)
# skip this test, could be alternate path s-u-v
# assert_equal(path['s']['v'],'x')
# alternate interface
dist = nx.floyd_warshall(G)
assert_equal(dist['s']['v'], 2)
@raises(KeyError)
def test_reconstruct_path(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
predecessors, _ = nx.floyd_warshall_predecessor_and_distance(XG)
path = nx.reconstruct_path('s', 'v', predecessors)
assert_equal(path, ['s', 'x', 'u', 'v'])
path = nx.reconstruct_path('s', 's', predecessors)
assert_equal(path, [])
# this part raises the keyError
nx.reconstruct_path('1', '2', predecessors)
def test_cycle(self):
path, dist = nx.floyd_warshall_predecessor_and_distance(
nx.cycle_graph(7))
assert_equal(dist[0][3], 3)
assert_equal(path[0][3], 2)
assert_equal(dist[0][4], 3)
def test_weighted(self):
XG3 = nx.Graph()
XG3.add_weighted_edges_from([[0, 1, 2], [1, 2, 12], [2, 3, 1],
[3, 4, 5], [4, 5, 1], [5, 0, 10]])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG3)
assert_equal(dist[0][3], 15)
assert_equal(path[0][3], 2)
def test_weighted2(self):
XG4 = nx.Graph()
XG4.add_weighted_edges_from([[0, 1, 2], [1, 2, 2], [2, 3, 1],
[3, 4, 1], [4, 5, 1], [5, 6, 1],
[6, 7, 1], [7, 0, 1]])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG4)
assert_equal(dist[0][2], 4)
assert_equal(path[0][2], 1)
def test_weight_parameter(self):
XG4 = nx.Graph()
XG4.add_edges_from([(0, 1, {'heavy': 2}), (1, 2, {'heavy': 2}),
(2, 3, {'heavy': 1}), (3, 4, {'heavy': 1}),
(4, 5, {'heavy': 1}), (5, 6, {'heavy': 1}),
(6, 7, {'heavy': 1}), (7, 0, {'heavy': 1})])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG4,
weight='heavy')
assert_equal(dist[0][2], 4)
assert_equal(path[0][2], 1)
def test_zero_distance(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
path, dist = nx.floyd_warshall_predecessor_and_distance(XG)
for u in XG:
assert_equal(dist[u][u], 0)
GG = XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
for u in GG:
dist[u][u] = 0
def test_zero_weight(self):
G = nx.DiGraph()
edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1),
(5, 4, 0), (4, 3, -5), (2, 5, -7)]
G.add_weighted_edges_from(edges)
dist = nx.floyd_warshall(G)
assert_equal(dist[1][3], -14)
G = nx.MultiDiGraph()
edges.append((2, 5, -7))
G.add_weighted_edges_from(edges)
dist = nx.floyd_warshall(G)
assert_equal(dist[1][3], -14)
|
the-stack_0_9567 | import os, sys, pygame
from pygame.locals import *
from Pellet import Pellet
from Pacman import Pacman
from Box import Box
# Initialize Pygame
pygame.init()
# Initialize Clock
mainClock = pygame.time.Clock()
# Constants
WINDOWWIDTH = 448 #(16 * 28) (row numbers range from 0 - 27)
WINDOWHEIGHT = 512 #(16 * 32) (column numbers range from 0 - 31)
LIVES = 3
# Initialize window
window = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
# Initialize colours
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# Set background
background = pygame.image.load('../../sprites/pacman-level.png')
window.blit(background, (0, 0))
# Initialize movement variables
moveLeft = False
moveRight = False
moveDown = False
moveUp = False
# Pixels per loop
MOVESPEED = 16
# Boxes (for collision purposes)
# To create a Box object: Box(x, y, COLOR)
box_group = pygame.sprite.Group()
# Grid (for movement)
# Uses Box objects
grid_group = pygame.sprite.Group()
# Teleporters
l_transporter = pygame.sprite.GroupSingle(Box(0, 16 * 15, BLUE))
r_transporter = pygame.sprite.GroupSingle(Box(16 * 27, 16 * 15, BLUE))
# Goes through the entire map and outlines which 16x16 areas are black
# This identifies where Pacman and Pellets can and cannot go
x = 0
y = 16
while y < WINDOWHEIGHT:
while x < WINDOWWIDTH:
# 16x16 area used for cropping
selected_area = pygame.Rect(x, y, 16, 16)
# Creates a cropped image from the background
cropped_image = background.subsurface(selected_area)
# If the cropped image's color is BLACK
if pygame.transform.average_color(cropped_image)[:3] == BLACK:
grid_member = Box(x, y, GREEN)
grid_member.check_possible_moves(x, y)
grid_group.add(grid_member)
else:
box_group.add(Box(x, y, RED))
x += 16
y += 16
x = 0
# Initialize Pacman
pacman = Pacman(224, 384, MOVESPEED, box_group) # 16 * 14, 16 * 24
pacman_group = pygame.sprite.GroupSingle(pacman)
# Initialize movement variable
movement = 'R'
last_movement = 'R'
# Draw Pacman onto the window
pacman_group.draw(window)
# Update display
pygame.display.update()
def update_window():
"""Updates the window by redrawing the background and sprites"""
# Redraw the background and sprites
window.blit(background, (0, 0))
# box_group.draw(window)
# grid_group.draw(window)
pacman_group.draw(window)
# Update the display
pygame.display.update()
mainClock.tick(10)
def transport_right(sprite):
"""Transports sprite from the right side of the window to the left side"""
while sprite.rect.left <= WINDOWWIDTH:
sprite.rect.right += 2
update_window()
sprite.rect.right = 0
while sprite.rect.left <= 0:
sprite.rect.right += 2
update_window()
sprite.rect = pygame.Rect(16 * 1, 16 * 15, 16, 16)
def transport_left(sprite):
"""Transports sprite from the left side of the window to the right side"""
while sprite.rect.right >= 0:
sprite.rect.left -= 2
update_window()
sprite.rect.left = WINDOWWIDTH
while sprite.rect.right >= WINDOWWIDTH:
sprite.rect.left -= 2
update_window()
sprite.rect = pygame.Rect(16 * 26, 16 * 15, 16, 16)
# Main loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
movement = 'U'
if event.key == K_DOWN:
movement = 'D'
if event.key == K_LEFT:
movement = 'L'
if event.key == K_RIGHT:
movement = 'R'
current_grid_location = pygame.sprite.spritecollide(pacman, grid_group, False)
grid_member = current_grid_location.pop()
if movement in grid_member.valid_moves:
# Updates Pacman's movement
pacman_group.update(movement)
last_movement = movement
else:
if last_movement in grid_member.valid_moves:
pacman_group.update(last_movement)
# Transport Pacman if Pacman collides with either transporter
if pygame.sprite.spritecollide(pacman, l_transporter, False):
transport_left(pacman)
elif pygame.sprite.spritecollide(pacman, r_transporter, False):
transport_right(pacman)
# Update game
update_window() |
the-stack_0_9570 | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for resetting an instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
DETAILED_HELP = {
'brief': 'Reset a virtual machine instance.',
'DESCRIPTION':
"""\
*{command}* is used to perform a hard reset on a Google
Compute Engine virtual machine.
This will not perform a clean shutdown of the guest OS on the instance.
""",
'EXAMPLES':
"""\
To reset an instance named ``test-instance'', run:
$ {command} test-instance
"""
}
class Reset(base.SilentCommand):
"""Reset a virtual machine instance."""
@staticmethod
def Args(parser):
flags.INSTANCES_ARG.AddArgument(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_refs = flags.INSTANCES_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
request_list = []
for instance_ref in instance_refs:
request = client.messages.ComputeInstancesResetRequest(
instance=instance_ref.Name(),
project=instance_ref.project,
zone=instance_ref.zone)
request_list.append((client.apitools_client.instances, 'Reset', request))
return client.MakeRequests(request_list)
Reset.detailed_help = DETAILED_HELP
|
the-stack_0_9571 | import requests
import argparse
import sys
import os
import bs4
import csv
import re
import enum
from datetime import datetime
class LogType(enum.Enum):
crime = "crime"
noncrime = "noncrime"
def _process_html(string, output_writer, log_type):
soup = bs4.BeautifulSoup(string, features="lxml")
table = soup.find(class_="articletext")
header_row = table.find(class_="White")
data1_list = []
data2_list = []
# For some reason each table is formatted slightly differently, so we have to account for that.
if log_type == LogType.crime:
data1_list = [data1 for data1 in table.find_all("tr") if len(data1.attrs) == 1 and "bgcolor" in data1.attrs]
data2_list = [data2 for data2 in table.find_all("tr", class_="body")]
elif log_type == LogType.noncrime:
data1_list = [data1 for data1 in table.find_all("tr") if len(data1.attrs) == 2 and "id" in data1.attrs and "row" in data1.attrs["id"]]
data2_list = [data2 for data2 in table.find_all("tr", class_="body")]
for data1, data2 in zip(data1_list, data2_list):
row = [re.sub(r'[\r\n\t]', '', value.text) for value in data1.find_all("td", limit=5)]
# Format the date column into year-first format.
try:
row[1] = datetime.strptime(row[1], "%m/%d/%Y").strftime("%y/%m/%d")
except ValueError:
pass
data2str = data2.find("td").text
location_str = re.search(r'Location:(.*)\n', data2str).group(1)
nature_str = re.search(r'Nature:(.*)\n', data2str).group(1)
row.append(re.sub(r'[\r\n\t]', '', location_str).strip())
row.append(re.sub(r'[\r\n\t]', '', nature_str).strip())
output_writer.writerow(row)
total_entry_count = table.find("span", style="font-weight: bold", string="of").parent.text.split()[-1]
return int(total_entry_count)
def scrape(scrape_output_dir="./scraped",
starting_offset=0,
max_entries=0,
max_retries=0,
local_files_path=None):
out_dir = os.path.join(scrape_output_dir, str(datetime.now())) # the unique output dir we'll use
os.makedirs(out_dir)
process_chunk = 100 # how many entries (we think) are getting retrieved per request
retry_count = 0
for log_type in LogType:
total_entries = max_entries
entries_processed = starting_offset
with open(os.path.join(out_dir, 'scrape-result-{}.csv'.format(log_type.value)), 'w') as output_file:
output_writer = csv.writer(output_file)
output_writer.writerow(["Case #", "Date Reported", "Occurrence Interval", "Disposition", "Status", "Location", "Nature"])
if local_files_path is None:
while total_entries == 0 or entries_processed < total_entries:
print("Processing entries {} through {}".format(entries_processed, entries_processed + process_chunk))
params = {"offset": str(entries_processed)}
try:
result = requests.get("http://police.gatech.edu/{}log.php".format(log_type.value), params)
result.raise_for_status()
except requests.exceptions.HTTPError:
if max_retries != 0 and retry_count >= max_retries:
print("Exceeded maximum retry count, aborting.")
exit(1)
else:
print("Request for entries starting at {} failed, retrying...".format(entries_processed))
retry_count += 1
continue
retry_count = 0
# Write fetched html to file, naming it [start-entry]-[end-entry]
with open(os.path.join(out_dir, "{}-{}-{}.html"
.format(log_type.value, entries_processed, entries_processed + process_chunk)), 'w') as result_html_file:
result_html_file.write(result.text)
reported_total_entries = _process_html(result.text, output_writer, log_type)
if total_entries == 0:
total_entries = reported_total_entries
entries_processed += process_chunk
else:
# We have a local set of previously-fetched files to use.
local_files = [file for file in os.listdir(local_files_path)
if os.path.isfile(os.path.join(local_files_path, file)) and file.startswith(log_type.value) and file.endswith(".html")]
for filename in local_files:
with open(os.path.join(local_files_path, filename), 'r') as file:
_process_html(file.read(), output_writer, log_type)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Scrape GTPD crime/non-crime logs")
argparser.add_argument("--scrape-output-dir", help="Where to save the scraped data.", default="./scraped")
argparser.add_argument("--starting-offset", help="Collection offset from the most recent log entry.", default=0)
argparser.add_argument("--max-entries", help="Maximum number of entries to gather from starting point."
"0 means no limit.", default=0, type=int)
argparser.add_argument("--max-retries", help="Maximum number of retries. 0 means no limit.", default=5)
argparser.add_argument("--local-files", help="Directory containing a set of local html files that contain GTPD"
"crime tables.")
arg_list = sys.argv.copy()
arg_list.pop(0)
args = argparser.parse_args(arg_list)
scrape(scrape_output_dir=args.scrape_output_dir,
starting_offset=args.starting_offset,
max_entries=args.max_entries,
max_retries=args.max_retries,
local_files_path=args.local_files)
|
the-stack_0_9573 | import json
import zlib
def compress(message):
try:
return zlib.compress(
json.dumps(message,
separators=(',', ':')),
9
)
except:
return ''
def decompress(message):
try:
return json.loads(
zlib.decompress(message)
)
except:
return {}
def create_id(ip_address):
return
|
the-stack_0_9574 | import graphgallery
import tensorflow as tf
graphgallery.set_memory_growth()
print("GraphGallery version: ", graphgallery.__version__)
print("TensorFlow version: ", tf.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
from graphgallery.gallery import Deepwalk
trainer = Deepwalk(graph).process().build()
his = trainer.train(splits.train_nodes)
results = trainer.test(splits.test_nodes)
print(f'Test accuracy {results.accuracy:.2%}')
|
the-stack_0_9575 | '''
Simulation Based on Hippocampus Recordings
Copyright Nate Sutton 2015
References:
Data from CRCNS.org hc3 .
Izhikevich neuron parameters from:
http://f1000research.com/articles/3-104/v1
'''
import pylab
import nest
import math as math
import numpy as np
'''
Create objects to run experiment with
'''
multimeter = nest.Create("multimeter",10)
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
multimeter2 = nest.Create("multimeter")
nest.SetStatus(multimeter2, {"withtime":True, "record_from":["V_m"]})
spikedetector_e_c_3 = nest.Create("spike_detector", params={"withgid": True, "withtime": True})
spikedetector_e_c_5 = nest.Create("spike_detector", params={"withgid": True, "withtime": True})
spikedetector_c_a_1 = nest.Create("spike_detector", params={"withgid": True, "withtime": True})
'''noise = nest.Create("poisson_generator", 2)
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])'''
e_c_3_layer = nest.Create("izhikevich",500,{'V_m':-70.0,'I_e':-160.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
e_c_5_layer = nest.Create("izhikevich",500,{'V_m':-70.0,'I_e':-180.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
c_a_1_layer = nest.Create("izhikevich",500,{'V_m':-70.0,'I_e':-180.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
'''
Form connections between neurons and run sim
NOTE: I may need to split the neurons into Ex and In
groups in layers for connections
With a number of neuron mismatch between layers
how is that processed in connections?
'''
'''
Synapses
'''
spike_time_incr = 5.0
def createSyn(input_layer, output_layer, fire_rate_ratio, syn_weight, neuron_range):
'''
neuron_range: min_index, max_index, total_neurons
Note: later uneven numbers of neurons in layers
could be added but for now using even.
Ratio of 1.0 creates 50% ex and 50% inh
2.0 creates 66% ex and 33% inh
0.5 creates 33% ex and 66% inh
TODO: check if ratio calc works exactly right
TODO: for now synapses are one-to-one to control ratio of responses.
In the future more e.g. one-to-many should be made while controlling
activity between layers
Note: It is needed that the number of exhitatory connections totally
control the amount of firing fore each next layer, no origional firing
occurs in any layer but the first.
'''
times_greater_ratio = math.ceil(fire_rate_ratio)
syn_dict = {"weight": syn_weight}
min_index = neuron_range[0]
max_index = neuron_range[1]
total_neurons = neuron_range[2]
total_range = range(math.floor((max_index-min_index)*total_neurons))
#print(total_range[-1])
total_range = np.array(total_range) + math.floor(min_index*total_neurons)
#print(total_range[-1])
len_in_layer = len(total_range)
#print(len_in_layer)
#len_out_layer = len(output_layer)
for time_greater in range(times_greater_ratio):
adjusted_delay = 0.1 + (spike_time_incr * time_greater)
adjusted_conn_total = len_in_layer
if (time_greater==(times_greater_ratio-1)):
adjusted_conn_total = math.floor(len_in_layer*(fire_rate_ratio-(times_greater_ratio-1)))
syn_dict = {"weight": syn_weight, "delay":adjusted_delay}
print(adjusted_conn_total)
for i in range(adjusted_conn_total):
#print(total_range[0])
#print(i)
n_i = total_range[i]
nest.Connect([input_layer[n_i]], [output_layer[n_i]], "one_to_one", syn_dict)
def eval_syn_weight(firing_ratio, initial_firing, region):
'''
Formula fitting tool used from here:
http://www.xuru.org/rt/MLR.asp
'''
if region == "ec3_to_ec5":
x1 = firing_ratio
x2 = initial_firing
w1 = 25.52143243
w2 = 1.057133539*(10**-2)
w3 = 25.75235337
y = w1 * x1 + w2 * x2 + w3
return y
elif region == "ec5_to_ca1":
x1 = firing_ratio
x2 = initial_firing
w1 = 5.128145363
w2 = 9.81585591*(10**-2)
w3 = 61.24183811
y = w1 * x1 + w2 * x2 + w3
return y
syn_weight = eval_syn_weight(1.4917, 582.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,1.4917, syn_weight, [0.0, 0.233, len(e_c_3_layer)])
syn_weight = eval_syn_weight(2.2081, 332.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,2.2081, syn_weight, [0.233, 0.367, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(2.2081, 500, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,2.2081, syn_weight, [0.367, 0.567, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(0.6152, 250, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,0.6152, syn_weight, [0.567, 0.667, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(0.3024, 167.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,0.3024, syn_weight, [0.667, 0.733, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(0.3024, 667.5, "ec3_to_ec5")
createSyn(e_c_3_layer,e_c_5_layer,0.3024, syn_weight, [0.733, 1.0, len(e_c_3_layer)])#
syn_weight = eval_syn_weight(6.8897, 865, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,6.8897, syn_weight, [0.0, 0.233, len(e_c_3_layer)])
syn_weight = eval_syn_weight(4.6546, 725, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,4.6546, syn_weight, [0.233, 0.367, len(e_c_3_layer)])
syn_weight = eval_syn_weight(1.6016, 1090, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,1.6016, syn_weight, [0.367, 0.567, len(e_c_3_layer)])
syn_weight = eval_syn_weight(5.7480, 195, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,5.7480, syn_weight, [0.567, 0.733, len(e_c_3_layer)])
syn_weight = eval_syn_weight(7.6722, 200, "ec5_to_ca1")
createSyn(e_c_5_layer,c_a_1_layer,7.6722, syn_weight, [0.733, 1.0, len(e_c_3_layer)])
nest.Connect(multimeter, e_c_3_layer)
nest.Connect(multimeter2, c_a_1_layer)
nest.Connect(e_c_3_layer, spikedetector_e_c_3)
nest.Connect(e_c_5_layer, spikedetector_e_c_5)
nest.Connect(c_a_1_layer, spikedetector_c_a_1)
'''
NOTE: filtering of spike counts after a certain
time happens later and therefore only a portion
of sim time is counted.
'''
nest.Simulate(2000.0)
'''
Record activity
'''
dmm = nest.GetStatus(multimeter)[0]
Vms = dmm["events"]["V_m"]
ts = dmm["events"]["times"]
dmm2 = nest.GetStatus(multimeter2)[0]
Vms2 = dmm2["events"]["V_m"]
ts2 = dmm2["events"]["times"]
'''
Plot results
'''
#pylab.figure(1)
#pylab.plot(ts, Vms)
#pylab.figure(2)
#pylab.plot(ts2, Vms2)
dSD = nest.GetStatus(spikedetector_e_c_3,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
print ('number of spikes')
print(sum(ts>800))
#pylab.figure(2)
#pylab.plot(ts, evs, ".")
dSD = nest.GetStatus(spikedetector_e_c_5,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
print ('number of spikes')
print(sum(ts>800))
#pylab.figure(3)
#pylab.plot(ts, evs, ".")
dSD = nest.GetStatus(spikedetector_c_a_1,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
print ('number of spikes')
print(sum(ts>800))
#pylab.figure(4)
#pylab.plot(ts, evs, ".")
pylab.show() |
the-stack_0_9576 | """
Copyright 2021 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging, kopf
from seeder_ccloud.openstack.openstack_helper import OpenstackHelper
from seeder_ccloud import utils
config = utils.Config()
@kopf.on.update(config.crd_info['plural'], annotations={'operatorVersion': config.operator_version}, field='spec.openstack.share_types')
@kopf.on.create(config.crd_info['plural'], annotations={'operatorVersion': config.operator_version}, field='spec.openstack.share_types')
def seed_share_types_handler(memo: kopf.Memo, new, old, name, annotations, **_):
logging.info('seeding {} share_types'.format(name))
if not config.is_dependency_successful(annotations):
raise kopf.TemporaryError('error seeding {}: {}'.format(name, 'dependencies error'), delay=30)
try:
changed = utils.get_changed_seeds(old, new)
Share_Types(memo['args'], memo['dry_run']).seed(changed)
except Exception as error:
raise kopf.TemporaryError('error seeding {}: {}'.format(name, error), delay=30)
class Share_Types():
def __init__(self, args, dry_run=False):
self.dry_run = dry_run
self.args = args
self.openstack = OpenstackHelper(args)
def seed(self, share_types):
logging.info('seeding manila share_types')
for share_type in share_types:
self._seed_share_types(share_type)
def _seed_share_types(self, share_type):
""" seed manila share type """
logging.debug("seeding Manila share type %s" % share_type)
# intialize manila client
try:
client = self.openstack.get_manilaclient("2.40")
manager = client.share_types
except Exception as e:
logging.error("Fail to initialize client: %s" % e)
raise
def get_type_by_name(name):
opts = {'all_tenants': 1}
for t in manager.list(search_opts=opts):
if t.name == name:
return t
return None
def validate_share_type(sharetype):
sharetype = self.openstack.sanitize(sharetype, [
'name', 'description', 'is_public', 'specs', 'extra_specs'])
specs = sharetype.pop('specs')
try:
sharetype['extra_specs'].update(specs)
except KeyError:
sharetype['extra_specs'] = specs
return sharetype
def update_type(stype, extra_specs):
to_be_unset = []
for k in list(stype.extra_specs.keys()):
if k not in list(extra_specs.keys()):
to_be_unset.append(k)
stype.unset_keys(to_be_unset)
stype.set_keys(extra_specs)
def create_type(sharetype):
extra_specs = sharetype['extra_specs']
try:
dhss = extra_specs.pop('driver_handles_share_servers')
sharetype['spec_driver_handles_share_servers'] = dhss
except KeyError:
pass
try:
snapshot_support = extra_specs.pop('snapshot_support')
sharetype['spec_snapshot_support'] = snapshot_support
except KeyError:
pass
sharetype['extra_specs'] = extra_specs
try:
manager.create(**sharetype)
except:
sharetype.pop('description')
manager.create(**sharetype)
# validation sharetype
share_type = validate_share_type(share_type)
logging.debug("Validated Manila share type %s" % share_type)
# update share type if exists
stype = get_type_by_name(share_type['name'])
if stype:
try:
if not self.dry_run:
update_type(stype, share_type['extra_specs'])
except Exception as e:
logging.error("Failed to update share type %s: %s" % (share_type, e))
raise
else:
try:
if not self.dry_run:
create_type(share_type)
except Exception as e:
logging.error("Failed to create share type %s: %s" % (share_type, e))
raise |
the-stack_0_9577 | import pipes
from galaxy import exceptions
from galaxy.util.none_like import NoneDataset
from galaxy.util import odict
from galaxy.util.object_wrapper import wrap_with_safe_string
from logging import getLogger
log = getLogger( __name__ )
# Fields in .log files corresponding to paths, must have one of the following
# field names and all such fields are assumed to be paths. This is to allow
# remote ComputeEnvironments (such as one used by LWR) determine what values to
# rewrite or transfer...
PATH_ATTRIBUTES = [ "path" ]
# ... by default though - don't rewrite anything (if no ComputeEnviornment
# defined or ComputeEnvironment doesn't supply a rewriter).
DEFAULT_PATH_REWRITER = lambda x: x
class ToolParameterValueWrapper( object ):
"""
Base class for object that Wraps a Tool Parameter and Value.
"""
def __nonzero__( self ):
return bool( self.value )
def get_display_text( self, quote=True ):
"""
Returns a string containing the value that would be displayed to the user in the tool interface.
When quote is True (default), the string is escaped for e.g. command-line usage.
"""
rval = self.input.value_to_display_text( self.value, self.input.tool.app ) or ''
if quote:
return pipes.quote( rval ) or "''" # pipes.quote in Python < 2.7 returns an empty string instead of the expected quoted empty string
return rval
class RawObjectWrapper( ToolParameterValueWrapper ):
"""
Wraps an object so that __str__ returns module_name:class_name.
"""
def __init__( self, obj ):
self.obj = obj
def __nonzero__( self ):
return bool( self.obj ) # FIXME: would it be safe/backwards compatible to rename .obj to .value, so that we can just inherit this method?
def __str__( self ):
try:
return "%s:%s" % (self.obj.__module__, self.obj.__class__.__name__)
except:
#Most likely None, which lacks __module__.
return str( self.obj )
def __getattr__( self, key ):
return getattr( self.obj, key )
class LibraryDatasetValueWrapper( ToolParameterValueWrapper ):
"""
Wraps an input so that __str__ gives the "param_dict" representation.
"""
def __init__( self, input, value, other_values={} ):
self.input = input
self.value = value
self._other_values = other_values
self.counter = 0
def __str__( self ):
return self.value
def __iter__( self ):
return self
def next( self ):
if self.counter >= len(self.value):
raise StopIteration
self.counter += 1
return self.value[ self.counter - 1 ]
def __getattr__( self, key ):
return getattr( self.value, key )
class InputValueWrapper( ToolParameterValueWrapper ):
"""
Wraps an input so that __str__ gives the "param_dict" representation.
"""
def __init__( self, input, value, other_values={} ):
self.input = input
self.value = value
self._other_values = other_values
def __str__( self ):
return self.input.to_param_dict_string( self.value, self._other_values )
def __getattr__( self, key ):
return getattr( self.value, key )
class SelectToolParameterWrapper( ToolParameterValueWrapper ):
"""
Wraps a SelectTooParameter so that __str__ returns the selected value, but all other
attributes are accessible.
"""
class SelectToolParameterFieldWrapper:
"""
Provide access to any field by name or index for this particular value.
Only applicable for dynamic_options selects, which have more than simple 'options' defined (name, value, selected).
"""
def __init__( self, input, value, other_values, path_rewriter ):
self._input = input
self._value = value
self._other_values = other_values
self._fields = {}
self._path_rewriter = path_rewriter
def __getattr__( self, name ):
if name not in self._fields:
self._fields[ name ] = self._input.options.get_field_by_name_for_value( name, self._value, None, self._other_values )
values = map( str, self._fields[ name ] )
if name in PATH_ATTRIBUTES:
# If we infer this is a path, rewrite it if needed.
values = map( self._path_rewriter, values )
return self._input.separator.join( values )
def __init__( self, input, value, app, other_values={}, path_rewriter=None ):
self.input = input
self.value = value
self.input.value_label = input.value_to_display_text( value, app )
self._other_values = other_values
self._path_rewriter = path_rewriter or DEFAULT_PATH_REWRITER
self.fields = self.SelectToolParameterFieldWrapper( input, value, other_values, self._path_rewriter )
def __str__( self ):
# Assuming value is never a path - otherwise would need to pass
# along following argument value_map=self._path_rewriter.
return self.input.to_param_dict_string( self.value, other_values=self._other_values )
def __getattr__( self, key ):
return getattr( self.input, key )
class DatasetFilenameWrapper( ToolParameterValueWrapper ):
"""
Wraps a dataset so that __str__ returns the filename, but all other
attributes are accessible.
"""
class MetadataWrapper:
"""
Wraps a Metadata Collection to return MetadataParameters wrapped
according to the metadata spec. Methods implemented to match behavior
of a Metadata Collection.
"""
def __init__( self, metadata ):
self.metadata = metadata
def __getattr__( self, name ):
rval = self.metadata.get( name, None )
if name in self.metadata.spec:
if rval is None:
rval = self.metadata.spec[name].no_value
rval = self.metadata.spec[ name ].param.to_safe_string( rval )
# Store this value, so we don't need to recalculate if needed
# again
setattr( self, name, rval )
else:
#escape string value of non-defined metadata value
rval = wrap_with_safe_string( rval )
return rval
def __nonzero__( self ):
return self.metadata.__nonzero__()
def __iter__( self ):
return self.metadata.__iter__()
def get( self, key, default=None ):
try:
return getattr( self, key )
except:
return default
def items( self ):
return iter( [ ( k, self.get( k ) ) for k, v in self.metadata.items() ] )
def __init__( self, dataset, datatypes_registry=None, tool=None, name=None, dataset_path=None, identifier=None ):
if not dataset:
try:
# TODO: allow this to work when working with grouping
ext = tool.inputs[name].extensions[0]
except:
ext = 'data'
self.dataset = wrap_with_safe_string( NoneDataset( datatypes_registry=datatypes_registry, ext=ext ), no_wrap_classes=ToolParameterValueWrapper )
else:
# Tool wrappers should not normally be accessing .dataset directly,
# so we will wrap it and keep the original around for file paths
# Should we name this .value to maintain consistency with most other ToolParameterValueWrapper?
self.unsanitized = dataset
self.dataset = wrap_with_safe_string( dataset, no_wrap_classes=ToolParameterValueWrapper )
self.metadata = self.MetadataWrapper( dataset.metadata )
self.datatypes_registry = datatypes_registry
self.false_path = getattr( dataset_path, "false_path", None )
self.false_extra_files_path = getattr( dataset_path, "false_extra_files_path", None )
self._element_identifier = identifier
@property
def element_identifier( self ):
identifier = self._element_identifier
if identifier is None:
identifier = self.name
return identifier
@property
def is_collection( self ):
return False
def is_of_type( self, *exts ):
datatypes = [ self.datatypes_registry.get_datatype_by_extension( e ) for e in exts ]
return self.dataset.datatype.matches_any( datatypes )
def __str__( self ):
if self.false_path is not None:
return self.false_path
else:
return self.unsanitized.file_name
def __getattr__( self, key ):
if self.false_path is not None and key == 'file_name':
# Path to dataset was rewritten for this job.
return self.false_path
elif self.false_extra_files_path is not None and key == 'extra_files_path':
# Path to extra files was rewritten for this job.
return self.false_extra_files_path
elif key == 'extra_files_path':
try:
# Assume it is an output and that this wrapper
# will be set with correct "files_path" for this
# job.
return self.files_path
except AttributeError:
# Otherwise, we have an input - delegate to model and
# object store to find the static location of this
# directory.
try:
return self.unsanitized.extra_files_path
except exceptions.ObjectNotFound:
# NestedObjectstore raises an error here
# instead of just returning a non-existent
# path like DiskObjectStore.
raise
else:
return getattr( self.dataset, key )
def __nonzero__( self ):
return bool( self.dataset )
class HasDatasets:
def _dataset_wrapper( self, dataset, dataset_paths, **kwargs ):
wrapper_kwds = kwargs.copy()
if dataset:
real_path = dataset.file_name
if real_path in dataset_paths:
wrapper_kwds[ "dataset_path" ] = dataset_paths[ real_path ]
return DatasetFilenameWrapper( dataset, **wrapper_kwds )
class DatasetListWrapper( list, ToolParameterValueWrapper, HasDatasets ):
"""
"""
def __init__( self, datasets, dataset_paths=[], **kwargs ):
if not isinstance(datasets, list):
datasets = [datasets]
def to_wrapper( dataset ):
if hasattr(dataset, "element_identifier"):
element = dataset
dataset = element.dataset_instance
kwargs["identifier"] = element.element_identifier
return self._dataset_wrapper( dataset, dataset_paths, **kwargs )
list.__init__( self, map( to_wrapper, datasets ) )
def __str__( self ):
return ','.join( map( str, self ) )
class DatasetCollectionWrapper( ToolParameterValueWrapper, HasDatasets ):
def __init__( self, has_collection, dataset_paths=[], **kwargs ):
super(DatasetCollectionWrapper, self).__init__()
if has_collection is None:
self.__input_supplied = False
return
else:
self.__input_supplied = True
if hasattr( has_collection, "name" ):
# It is a HistoryDatasetCollectionAssociation
collection = has_collection.collection
self.name = has_collection.name
elif hasattr( has_collection, "child_collection" ):
# It is a DatasetCollectionElement instance referencing another collection
collection = has_collection.child_collection
self.name = has_collection.element_identifier
else:
collection = has_collection
self.name = None
elements = collection.elements
element_instances = odict.odict()
element_instance_list = []
for dataset_collection_element in elements:
element_object = dataset_collection_element.element_object
element_identifier = dataset_collection_element.element_identifier
if dataset_collection_element.is_collection:
element_wrapper = DatasetCollectionWrapper( dataset_collection_element, dataset_paths, **kwargs )
else:
element_wrapper = self._dataset_wrapper( element_object, dataset_paths, **kwargs)
element_instances[element_identifier] = element_wrapper
element_instance_list.append( element_wrapper )
self.__element_instances = element_instances
self.__element_instance_list = element_instance_list
def keys( self ):
if not self.__input_supplied:
return []
return self.__element_instances.keys()
@property
def is_collection( self ):
return True
@property
def is_input_supplied( self ):
return self.__input_supplied
def __getitem__( self, key ):
if not self.__input_supplied:
return None
if isinstance( key, int ):
return self.__element_instance_list[ key ]
else:
return self.__element_instances[ key ]
def __getattr__( self, key ):
if not self.__input_supplied:
return None
return self.__element_instances[ key ]
def __iter__( self ):
if not self.__input_supplied:
return [].__iter__()
return self.__element_instance_list.__iter__()
def __nonzero__( self ):
# Fail `#if $param` checks in cheetah is optional input
# not specified or if resulting collection is empty.
return self.__input_supplied and bool( self.__element_instance_list )
|
the-stack_0_9578 | # !/usr/bin/env python2.7
# File: taxii_parser.py
#
# Copyright (c) 2014-2016 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# This file contains the code to parset a STIX xml file.
import sys
import simplejson as json
import cStringIO
import libtaxii as lt
import stix_parser as sp
def parse_taxii_message(taxii_message, base_connector=None):
number_of_cbs = len(taxii_message.content_blocks)
if (not number_of_cbs):
return {'error': 'no control blocks found'}
packages = []
for i, cb in enumerate(taxii_message.content_blocks):
if (base_connector):
base_connector.send_progress("Parsing Content Block # {0}".format(i))
# Give it to the stix parser to create the containers and artifacts
# This code is the only place where the stix parsing will be written
stix_xml = cb.content
cstrio = cStringIO.StringIO()
cstrio.write(stix_xml)
cstrio.seek(0)
package = sp.parse_stix(cstrio, base_connector)
if (package):
# print (json.dumps(package, indent=' ' * 4))
packages.append(package)
return sp.parse_packages(packages, base_connector)
if __name__ == '__main__':
import pudb
pudb.set_trace()
results = None
with open(sys.argv[1]) as f:
# first try to parse it as a taxii message
try:
taxii_msg = lt.tm11.get_message_from_xml(f.read())
except:
# Now as a a stix document
try:
f.seek(0)
package = sp.parse_stix(f, None)
if (package):
packages = [package]
results = sp.parse_packages(packages, None)
except:
raise
else:
results = parse_taxii_message(taxii_msg, None)
# import pprint;pprint.pprint(results)
with open('./taxii-parsed.json', 'w') as f:
f.write(json.dumps(results, indent=' ' * 4))
|
the-stack_0_9579 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import py3to2
from robot.variables import VariableIterator
@py3to2
class Token(object):
"""Token representing piece of Robot Framework data.
Each token has type, value, line number, column offset and end column
offset in :attr:`type`, :attr:`value`, :attr:`lineno`, :attr:`col_offset`
and :attr:`end_col_offset` attributes, respectively. Tokens representing
error also have their error message in :attr:`error` attribute.
Token types are declared as class attributes such as :attr:`SETTING_HEADER`
and :attr:`EOL`. Values of these constants have changed slightly in Robot
Framework 4.0 and they may change again in the future. It is thus safer
to use the constants, not their values, when types are needed. For example,
use ``Token(Token.EOL)`` instead of ``Token('EOL')`` and
``token.type == Token.EOL`` instead of ``token.type == 'EOL'``.
If :attr:`value` is not given when :class:`Token` is initialized and
:attr:`type` is :attr:`IF`, :attr:`ELSE_IF`, :attr:`ELSE`, :attr:`FOR`,
:attr:`END`, :attr:`WITH_NAME` or :attr:`CONTINUATION`, the value is
automatically set to the correct marker value like ``'IF'`` or ``'ELSE IF'``.
If :attr:`type` is :attr:`EOL` in this case, the value is set to ``'\\n'``.
"""
SETTING_HEADER = 'SETTING HEADER'
VARIABLE_HEADER = 'VARIABLE HEADER'
TESTCASE_HEADER = 'TESTCASE HEADER'
KEYWORD_HEADER = 'KEYWORD HEADER'
COMMENT_HEADER = 'COMMENT HEADER'
TESTCASE_NAME = 'TESTCASE NAME'
KEYWORD_NAME = 'KEYWORD NAME'
DOCUMENTATION = 'DOCUMENTATION'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
METADATA = 'METADATA'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
FORCE_TAGS = 'FORCE TAGS'
DEFAULT_TAGS = 'DEFAULT TAGS'
LIBRARY = 'LIBRARY'
RESOURCE = 'RESOURCE'
VARIABLES = 'VARIABLES'
SETUP = 'SETUP'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TIMEOUT = 'TIMEOUT'
TAGS = 'TAGS'
ARGUMENTS = 'ARGUMENTS'
RETURN = 'RETURN'
NAME = 'NAME'
VARIABLE = 'VARIABLE'
ARGUMENT = 'ARGUMENT'
ASSIGN = 'ASSIGN'
KEYWORD = 'KEYWORD'
WITH_NAME = 'WITH NAME'
FOR = 'FOR'
FOR_SEPARATOR = 'FOR SEPARATOR'
END = 'END'
IF = 'IF'
ELSE_IF = 'ELSE IF'
ELSE = 'ELSE'
SEPARATOR = 'SEPARATOR'
COMMENT = 'COMMENT'
CONTINUATION = 'CONTINUATION'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
FATAL_ERROR = 'FATAL ERROR'
NON_DATA_TOKENS = frozenset((
SEPARATOR,
COMMENT,
CONTINUATION,
EOL,
EOS
))
SETTING_TOKENS = frozenset((
DOCUMENTATION,
SUITE_SETUP,
SUITE_TEARDOWN,
METADATA,
TEST_SETUP,
TEST_TEARDOWN,
TEST_TEMPLATE,
TEST_TIMEOUT,
FORCE_TAGS,
DEFAULT_TAGS,
LIBRARY,
RESOURCE,
VARIABLES,
SETUP,
TEARDOWN,
TEMPLATE,
TIMEOUT,
TAGS,
ARGUMENTS,
RETURN
))
HEADER_TOKENS = frozenset((
SETTING_HEADER,
VARIABLE_HEADER,
TESTCASE_HEADER,
KEYWORD_HEADER,
COMMENT_HEADER
))
ALLOW_VARIABLES = frozenset((
NAME,
ARGUMENT,
TESTCASE_NAME,
KEYWORD_NAME
))
__slots__ = ['type', 'value', 'lineno', 'col_offset', 'error']
def __init__(self, type=None, value=None, lineno=-1, col_offset=-1, error=None):
self.type = type
if value is None:
value = {
Token.IF: 'IF', Token.ELSE_IF: 'ELSE IF', Token.ELSE: 'ELSE',
Token.FOR: 'FOR', Token.END: 'END', Token.CONTINUATION: '...',
Token.EOL: '\n', Token.WITH_NAME: 'WITH NAME'
}.get(type, '')
self.value = value
self.lineno = lineno
self.col_offset = col_offset
self.error = error
@property
def end_col_offset(self):
if self.col_offset == -1:
return -1
return self.col_offset + len(self.value)
def set_error(self, error, fatal=False):
self.type = Token.ERROR if not fatal else Token.FATAL_ERROR
self.error = error
def tokenize_variables(self):
"""Tokenizes possible variables in token value.
Yields the token itself if the token does not allow variables (see
:attr:`Token.ALLOW_VARIABLES`) or its value does not contain
variables. Otherwise yields variable tokens as well as tokens
before, after, or between variables so that they have the same
type as the original token.
"""
if self.type not in Token.ALLOW_VARIABLES:
return self._tokenize_no_variables()
variables = VariableIterator(self.value)
if not variables:
return self._tokenize_no_variables()
return self._tokenize_variables(variables)
def _tokenize_no_variables(self):
yield self
def _tokenize_variables(self, variables):
lineno = self.lineno
col_offset = self.col_offset
remaining = ''
for before, variable, remaining in variables:
if before:
yield Token(self.type, before, lineno, col_offset)
col_offset += len(before)
yield Token(Token.VARIABLE, variable, lineno, col_offset)
col_offset += len(variable)
if remaining:
yield Token(self.type, remaining, lineno, col_offset)
def __str__(self):
return self.value
def __repr__(self):
type_ = self.type.replace(' ', '_') if self.type else 'None'
error = '' if not self.error else ', %r' % self.error
return 'Token(%s, %r, %s, %s%s)' % (type_, self.value, self.lineno,
self.col_offset, error)
def __eq__(self, other):
return (isinstance(other, Token)
and self.type == other.type
and self.value == other.value
and self.lineno == other.lineno
and self.col_offset == other.col_offset
and self.error == other.error)
def __ne__(self, other):
return not self == other
class EOS(Token):
"""Token representing end of a statement."""
__slots__ = []
def __init__(self, lineno=-1, col_offset=-1):
Token.__init__(self, Token.EOS, '', lineno, col_offset)
@classmethod
def from_token(cls, token):
return EOS(lineno=token.lineno, col_offset=token.end_col_offset)
|
the-stack_0_9580 | from __future__ import absolute_import
import logging
import re
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.conf.urls import patterns, include, url
from sentry.plugins.base import plugins
logger = logging.getLogger("sentry.plugins")
def ensure_url(u):
if isinstance(u, (tuple, list)):
return url(*u)
elif not isinstance(u, (RegexURLResolver, RegexURLPattern)):
raise TypeError(
"url must be RegexURLResolver or RegexURLPattern, not %r: %r" % (type(u).__name__, u)
)
return u
def load_plugin_urls(plugins):
urlpatterns = patterns("")
for plugin in plugins:
try:
urls = plugin.get_project_urls()
if not urls:
continue
urls = [ensure_url(u) for u in urls]
except Exception:
logger.exception("routes.failed", extra={"plugin": type(plugin).__name__})
else:
urlpatterns.append(url(r"^%s/" % re.escape(plugin.slug), include(urls)))
return urlpatterns
urlpatterns = load_plugin_urls(plugins.all())
|
the-stack_0_9585 | """ Since we also expect the answers in the SQuAD format, we reuse its code """
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for dp in dataset:
for qa in dp['qa']:
total += 1
if qa['id'] not in predictions:
message = 'Question id ' + qa['id'] + \
' not present. Will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = ['NA'] if len(qa['answers']) == 0 else qa['answers']
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluation for DuoRC')
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset = json.load(dataset_file)
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions))) |
the-stack_0_9586 | #! /usr/bin/env python3
"""Simple module/script showing argparse"""
from argparse import ArgumentParser
def get_args():
"""argument parser for argparse_example"""
parser = ArgumentParser()
parser.add_argument("--datadir", required=False, type=str,
default="/logs/MECS", help="path to data directory")
parser.add_argument("--threads", "-j", required=False, type=int,
help="number of threads", default=5)
parser.add_argument("--verbose", "-v", required=False,
action="store_true",
help="used to produce more verbose output")
args = parser.parse_args()
return args
def main(args):
"""main function for argparse_example
splitting the args out separately allows this to be reused in programs"""
print("The args namespace is {}".format(args))
if __name__ == "__main__":
args = get_args()
main(args)
|
the-stack_0_9587 | #!/usr/bin/env python3
import os
# requires prompt-toolkit > 2
from prompt_toolkit import PromptSession, HTML
from prompt_toolkit.application.current import get_app
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.styles import Style
global_kb = KeyBindings()
# if bottom toolbar is visible
global_toolbar = None
# color the application
style = Style.from_dict({
# reset bottom_toolbar style
'bottom-toolbar': 'noreverse gray bg:black',
'bottom-toolbar label': 'black bg:darkcyan'
})
def get_mainbar():
""" return keybar adding missing empty keys
"""
mainbar = [
(1, 'Help'),
(10, 'Quit'),
]
# there could be 12 keys at the bottom
keysno = max(10, max(m[0] for m in mainbar))
# add missing keys to keybar
for idx in range(1, keysno+1): # 1-10 or 1-12
if len(mainbar) >= idx:
#print(len(mainbar), idx, mainbar[idx-1])
if mainbar[idx-1][0] == idx:
continue
else:
mainbar.insert(idx-1, (idx, ''))
else: # no more buttons
mainbar.append((idx, ''))
#print(mainbar)
return mainbar
def bottom_toolbar():
keybar = get_mainbar()
minlabel = 6
labelled = []
for key, label in keybar:
if len(label) > minlabel: # then trim label
label = label[:minlabel]
# {:{}} means width is specified by argument
labelled.append("{}<label>{:{w}}</label>".format(
key, label, w=minlabel))
return HTML(' '.join(labelled))
@global_kb.add('c-b') # Ctrl+B
def switch_toolbar(event):
global global_toolbar, session
if not global_toolbar:
global_toolbar = bottom_toolbar
else:
global_toolbar = None
session.bottom_toolbar = global_toolbar
@global_kb.add('f10') # F10
def quit(event):
get_app().exit(exception=EOFError) # mimic Ctrl-D
session = PromptSession(os.getcwd() + ">", key_bindings=global_kb, bottom_toolbar=bottom_toolbar, style=style)
while True:
try:
text = session.prompt()
except KeyboardInterrupt: # [ ] in Far Ctrl+C does nothing
break
except EOFError: # [ ] in Far Ctrl+D does nothing
break
else:
print("[debug] " + text)
|
the-stack_0_9588 | from datetime import date
from datetime import timedelta
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class CovidNineteen:
def get_latest_daily_report(self):
"""
Get latest daily report(world) from:
https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports
"""
data_date = date.today()
data_date_delta = timedelta(days=1)
daily_report_url_no_date = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{}.csv"
while True:
data_date_str = date.strftime(data_date, '%m-%d-%Y')
daily_report_url = daily_report_url_no_date.format(data_date_str)
try:
print("Trying to get {} daily report.".format(data_date_str))
daily_report = pd.read_csv(daily_report_url)
print("The file exists๏ผgot {} daily report.".format(data_date_str))
break
except:
print("{} hasn't uploaded yet.".format(data_date_str))
data_date -= data_date_delta # data_date = data_date - data_date_delta
return daily_report
def get_time_series(self):
"""
Get time series data from:
https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
"""
time_series = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
return time_series
covid_19 = CovidNineteen()
daily_report = covid_19.get_latest_daily_report()
time_series = covid_19.get_time_series()
time_series.head()
idVars =['Province/State', 'Country/Region', 'Lat', 'Long']
time_series_long = pd.melt(time_series, id_vars=idVars, var_name='Date', value_name='Confirmed')
time_series_long['Date'] = pd.to_datetime(time_series_long['Date'])
country_confirmed_groupby = time_series_long.groupby(['Date', 'Country/Region'])['Confirmed'].sum()
df_country_confirmed = pd.DataFrame(country_confirmed_groupby).reset_index()
country_confirmed = df_country_confirmed.sort_values('Confirmed', ascending=True)
#============================= Make a matplotlib picture =========================================
#us = country_confirmed[country_confirmed['Country/Region'].str.contains('US')]
#If we use USA plot on this pic, we won't see the other Country's progress.
cn = country_confirmed[country_confirmed['Country/Region'].str.contains('China')]
jpn = country_confirmed[country_confirmed['Country/Region'].str.contains('Japan')]
kr = country_confirmed[country_confirmed['Country/Region'].str.contains('Korea, South')]
tw = country_confirmed[country_confirmed['Country/Region'].str.contains('Taiwan')]
plt.figure(figsize=(20, 5))
plt.title('Covid 19 Confirmed Results', fontsize=20)
plt.xlabel('Date', fontsize=20)
plt.ylabel('Population', fontsize=20)
#plt.plot(us['Date'], us['Confirmed'], label='USA')
plt.plot(cn['Date'], cn['Confirmed'], label='China')
plt.plot(jpn['Date'], jpn['Confirmed'], label='Japan')
plt.plot(kr['Date'], kr['Confirmed'], label='Korea South')
plt.plot(tw['Date'], tw['Confirmed'], label='Taiwan')
plt.legend(loc=2)
plt.show()
|
the-stack_0_9593 | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tรจs <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from .object import Object
from .primitives import Int, Long
class Message(Object):
ID = 0x5bb8e511 # hex(crc32(b"message msg_id:long seqno:int bytes:int body:Object = Message"))
def __init__(self, body: Object, msg_id: int, seq_no: int, length: int):
self.msg_id = msg_id
self.seq_no = seq_no
self.length = length
self.body = body
@staticmethod
def read(b: BytesIO, *args) -> "Message":
msg_id = Long.read(b)
seq_no = Int.read(b)
length = Int.read(b)
body = b.read(length)
return Message(Object.read(BytesIO(body)), msg_id, seq_no, length)
def write(self) -> bytes:
b = BytesIO()
b.write(Long(self.msg_id))
b.write(Int(self.seq_no))
b.write(Int(self.length))
b.write(self.body.write())
return b.getvalue()
|
the-stack_0_9594 | from models.app_config import ApplicationGroupConfig
from modules.configs.config_factory import ConfigFactory
from modules.runner.app_runner import ApplicationRunner
from pick import pick
def pick_config(app_configs: list[ApplicationGroupConfig]):
title = 'Choose which workflow config to start: '
config_options = list(map(lambda c: c.get_config_name(), app_configs))
option, index = pick(config_options, title, indicator='=> ')
return app_configs[index]
def main():
group_configs = ConfigFactory.get_default_parser().get_config()
chosen_group_config = pick_config(group_configs)
app_runner = ApplicationRunner(chosen_group_config)
app_runner.spawn_work_spaces()
input("Press enter to confirm all the windows have started (you config will be restored)")
app_runner.restore_config()
if __name__ == '__main__':
main()
|
the-stack_0_9597 | import pandas as pd
import matplotlib.pyplot as plt
import csv
query = "query-1"
auto_scaler = "HPA"
percentage = "80"
path_to_file = "../experiment_data_processed/full_data/" + query + "_" + auto_scaler + "_" + percentage + ".csv"
df = pd.read_csv(path_to_file)
taskmanager = df['taskmanager'].tolist()
latency = df['latency'].tolist()
previous_number_taskmanagers = taskmanager[0]
scaling_events = 0
for val in taskmanager:
if val != previous_number_taskmanagers:
scaling_events += 1
previous_number_taskmanagers = val
average_latency = sum(latency) / len(latency)
average_taskmanager = sum(taskmanager) / len(taskmanager)
with open("../experiment_data_processed/evaluation_metrics/" + query + "_" + auto_scaler + "_" + percentage + ".csv", 'w') as f:
# create the csv writer
writer = csv.writer(f)
writer.writerow(["latency", average_latency])
writer.writerow(["taskmanager", average_taskmanager])
writer.writerow(["scaling_events", scaling_events]) |
the-stack_0_9599 | """
Current-flow closeness centrality measures.
"""
# Copyright (C) 2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg <[email protected]>"""
__all__ = ['current_flow_closeness_centrality','information_centrality']
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
def current_flow_closeness_centrality(G, normalized=True, weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
A variant of closeness centrality based on effective
resistance between nodes in a network. This metric
is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional
If True the values are normalized by 1/(n-1) where n is the
number of nodes in G.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] Stephenson, K. and Zelen, M.
Rethinking centrality: Methods and examples.
Social Networks. Volume 11, Issue 1, March 1989, pp. 1-37
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_closeness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_closeness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_closeness_centrality ',
'not defined for digraphs.')
if G.is_directed():
raise nx.NetworkXError(\
"current_flow_closeness_centrality() not defined for digraphs.")
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername={"full" :FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
n = G.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col=C2.get_row(v)
for w in H:
betweenness[v]+=col[v]-2*col[w]
betweenness[w]+=col[v]
if normalized:
nb=len(betweenness)-1.0
else:
nb=1.0
for v in H:
betweenness[v]=nb/(betweenness[v])
return dict((ordering[k],float(v)) for k,v in betweenness.items())
information_centrality=current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
|
the-stack_0_9600 | """
disk_dict.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import cPickle
from w3af.core.data.misc.cpickle_dumps import cpickle_dumps
from w3af.core.data.fuzzer.utils import rand_alpha
from w3af.core.data.db.dbms import get_default_temp_db_instance
class DiskDict(object):
"""
It's a dict that stores items in a sqlite3 database and has the following
features:
- Dict-like API
- Is thread safe
- Deletes the table when the instance object is deleted
:author: Andres Riancho ([email protected])
"""
def __init__(self, table_prefix=None):
self.db = get_default_temp_db_instance()
prefix = '' if table_prefix is None else ('%s_' % table_prefix)
self.table_name = 'disk_dict_' + prefix + rand_alpha(30)
# Create table
# DO NOT add the AUTOINCREMENT flag to the table creation since that
# will break __getitem__ when an item is removed, see:
# http://www.sqlite.org/faq.html#q1
columns = [('index_', 'INTEGER'),
('key', 'BLOB'),
('value', 'BLOB')]
pks = ['index_']
self.db.create_table(self.table_name, columns, pks)
self.db.create_index(self.table_name, ['key'])
self.db.commit()
def cleanup(self):
self.db.drop_table(self.table_name)
def keys(self):
pickled_keys = self.db.select('SELECT key FROM %s' % self.table_name)
result_list = []
for r in pickled_keys:
result_list.append(cPickle.loads(r[0]))
return result_list
def iterkeys(self):
pickled_keys = self.db.select('SELECT key FROM %s' % self.table_name)
for r in pickled_keys:
yield cPickle.loads(r[0])
def iteritems(self):
pickled_keys = self.db.select('SELECT key, value FROM %s' % self.table_name)
for r in pickled_keys:
yield cPickle.loads(r[0]), cPickle.loads(r[1])
def __contains__(self, key):
"""
:return: True if the value is in keys
"""
# Adding the "limit 1" to the query makes it faster, as it won't
# have to scan through all the table/index, it just stops on the
# first match.
query = 'SELECT count(*) FROM %s WHERE key=? limit 1' % self.table_name
r = self.db.select_one(query, (cpickle_dumps(key),))
return bool(r[0])
def __delitem__(self, key):
"""
Delete the key from the dict
:param key: The key to delete
:return: None
"""
query = 'DELETE FROM %s WHERE key = ?' % self.table_name
self.db.execute(query, (cpickle_dumps(key),))
def __setitem__(self, key, value):
# Test if it is already in the DB:
if key in self:
query = 'UPDATE %s SET value = ? WHERE key=?' % self.table_name
self.db.execute(query, (cpickle_dumps(value),
cpickle_dumps(key)))
else:
query = "INSERT INTO %s VALUES (NULL, ?, ?)" % self.table_name
self.db.execute(query, (cpickle_dumps(key),
cpickle_dumps(value)))
def __getitem__(self, key):
query = 'SELECT value FROM %s WHERE key=? limit 1' % self.table_name
r = self.db.select(query, (cpickle_dumps(key),))
if not r:
args = (key, self.table_name)
raise KeyError('%s not in %s.' % args)
return cPickle.loads(r[0][0])
def __len__(self):
query = 'SELECT count(*) FROM %s' % self.table_name
r = self.db.select_one(query)
return r[0]
def get(self, key, default=-456):
try:
return self[key]
except KeyError:
if default is not -456:
return default
raise KeyError()
def pop(self, key, default=-456):
value = self.get(key, default=default)
del self[key]
return value
|
the-stack_0_9603 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""ploting tools."""
import numpy as np
import nibabel as nb
import pandas as pd
from nilearn.signal import clean
import matplotlib.pyplot as plt
from matplotlib import gridspec as mgs
import seaborn as sns
from niworkflows.viz.plots import plot_carpet as plot_carpetX
from ..utils import read_ndata
def plotimage(img,out_file):
fig = plt.figure(constrained_layout=False, figsize=(30, 15))
from nilearn.plotting import plot_anat
plot_anat(img,draw_cross=False,figure=fig)
fig.savefig(out_file,bbox_inches="tight", pad_inches=None)
return out_file
def plot_svg(fdata,fd,dvars,filename,tr=1):
'''
plot carpetplot with fd and dvars
------------
fdata:
4D ndarray
fd:
framewise displacement
dvars:
dvars
filename
filename
tr:
repetion time
'''
fig = plt.figure(constrained_layout=False, figsize=(30, 15))
grid = mgs.GridSpec(3, 1, wspace=0.0, hspace=0.05,
height_ratios=[1] * (3 - 1) + [5])
confoundplot(fd, grid[0], tr=tr, color='b', name='FD')
confoundplot(dvars, grid[1], tr=tr, color='r', name='DVARS')
plot_carpet(func_data=fdata,subplot=grid[-1], tr=tr,)
fig.savefig(filename,bbox_inches="tight", pad_inches=None)
def compute_dvars(datat):
'''
compute standard dvars
datat : numpy darrays
data matrix vertices by timepoints
'''
firstcolumn=np.zeros((datat.shape[0]))[...,None]
datax=np.hstack((firstcolumn,np.diff(datat)))
datax_ss=np.sum(np.square(datax),axis=0)/datat.shape[0]
return np.sqrt(datax_ss)
def plot_carpet(func_data,detrend=True, nskip=0, size=(950, 800),
subplot=None, title=None, output_file=None, legend=False,
tr=None):
"""
Plot an image representation of voxel intensities across time also know
as the "carpet plot"
from Niworkflows
Parameters
----------
func_data :
4D ndarray
detrend : boolean, optional
Detrend and standardize the data prior to plotting.
nskip : int
Number of volumes at the beginning of the scan marked to be excluded.
title : string, optional
The title displayed on the figure.
output_file : string, or None, optional
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
legend : bool
Whether to render the average functional series with ``atlaslabels`` as
overlay.
tr : float , optional
Specify the TR, if specified it uses this value. If left as None,
# Frames is plotted instead of time.
"""
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1
ntsteps = func_data.shape[-1]
data = func_data.reshape(-1, ntsteps)
p_dec = 1 + data.shape[0] // size[0]
if p_dec:
data = data[::p_dec, :]
t_dec = 1 + data.shape[1] // size[1]
if t_dec:
data = data[:, ::t_dec]
# Detrend data
v = (None, None)
if detrend:
data = clean(data.T, t_r=tr).T
v = (-2, 2)
# If subplot is not defined
if subplot is None:
subplot = mgs.GridSpec(1, 1)[0]
# Define nested GridSpec
wratios = [1, 100, 20]
gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
width_ratios=wratios[:2 + int(legend)],
wspace=0.0)
# Carpet plot
ax1 = plt.subplot(gs[1])
ax1.imshow(data, interpolation='nearest', aspect='auto', cmap='gray',
vmin=v[0], vmax=v[1])
ax1.grid(False)
ax1.set_yticks([])
ax1.set_yticklabels([])
# Set 10 frame markers in X axis
interval = max((int(data.shape[-1] + 1) //
10, int(data.shape[-1] + 1) // 5, 1))
xticks = list(range(0, data.shape[-1])[::interval])
ax1.set_xticks(xticks)
if notr:
ax1.set_xlabel('time (frame #)')
else:
ax1.set_xlabel('time (s)')
labels = tr * (np.array(xticks)) * t_dec
ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=10)
# Remove and redefine spines
for side in ["top", "right"]:
ax1.spines[side].set_color('none')
ax1.spines[side].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_color('none')
ax1.spines["left"].set_visible(False)
if output_file is not None:
figure = plt.gcf()
figure.savefig(output_file, bbox_inches='tight')
plt.close(figure)
figure = None
return output_file
return [ax1], gs
def confoundplot(tseries, gs_ts, gs_dist=None, name=None,
units=None, tr=None, hide_x=True, color='b', nskip=0,
cutoff=None, ylims=None):
'''
adapted from niworkflows
tseries:
numpy array
gs_ts:
GridSpec
name:
file name
units:
tseries unit
tr:
repetition time
'''
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1.
ntsteps = len(tseries)
tseries = np.array(tseries)
# Define nested GridSpec
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts,
width_ratios=[1, 100], wspace=0.0)
ax_ts = plt.subplot(gs[1])
ax_ts.grid(False)
# Set 10 frame markers in X axis
interval = max((ntsteps // 10, ntsteps // 5, 1))
xticks = list(range(0, ntsteps)[::interval])
ax_ts.set_xticks(xticks)
if not hide_x:
if notr:
ax_ts.set_xlabel('time (frame #)')
else:
ax_ts.set_xlabel('time (s)')
labels = tr * np.array(xticks)
ax_ts.set_xticklabels(['%.02f' % t for t in labels.tolist()])
else:
ax_ts.set_xticklabels([])
if name is not None:
if units is not None:
name += ' [%s]' % units
ax_ts.annotate(
name, xy=(0.0, 0.7), xytext=(0, 0), xycoords='axes fraction',
textcoords='offset points', va='center', ha='left',
color=color, size=20,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none',
'color': 'none', 'lw': 0, 'alpha': 0.8})
for side in ["top", "right"]:
ax_ts.spines[side].set_color('none')
ax_ts.spines[side].set_visible(False)
if not hide_x:
ax_ts.spines["bottom"].set_position(('outward', 20))
ax_ts.xaxis.set_ticks_position('bottom')
else:
ax_ts.spines["bottom"].set_color('none')
ax_ts.spines["bottom"].set_visible(False)
# ax_ts.spines["left"].set_position(('outward', 30))
ax_ts.spines["left"].set_color('none')
ax_ts.spines["left"].set_visible(False)
# ax_ts.yaxis.set_ticks_position('left')
ax_ts.set_yticks([])
ax_ts.set_yticklabels([])
nonnan = tseries[~np.isnan(tseries)]
if nonnan.size > 0:
# Calculate Y limits
valrange = (nonnan.max() - nonnan.min())
def_ylims = [nonnan.min() - 0.1 * valrange,
nonnan.max() + 0.1 * valrange]
if ylims is not None:
if ylims[0] is not None:
def_ylims[0] = min([def_ylims[0], ylims[0]])
if ylims[1] is not None:
def_ylims[1] = max([def_ylims[1], ylims[1]])
# Add space for plot title and mean/SD annotation
def_ylims[0] -= 0.1 * (def_ylims[1] - def_ylims[0])
ax_ts.set_ylim(def_ylims)
# Annotate stats
maxv = nonnan.max()
mean = nonnan.mean()
stdv = nonnan.std()
p95 = np.percentile(nonnan, 95.0)
else:
maxv = 0
mean = 0
stdv = 0
p95 = 0
stats_label = (r'max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} '
r'$\bullet$ $\sigma$: {sigma:.3f}').format(
max=maxv, mean=mean, units=units or '', sigma=stdv)
ax_ts.annotate(
stats_label, xy=(0.98, 0.7), xycoords='axes fraction',
xytext=(0, 0), textcoords='offset points',
va='center', ha='right', color=color, size=10,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none', 'color': 'none',
'lw': 0, 'alpha': 0.8}
)
# Annotate percentile 95
ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=.1, color='lightgray')
ax_ts.annotate(
'%.2f' % p95, xy=(0, p95), xytext=(-1, 0),
textcoords='offset points', va='center', ha='right',
color='lightgray', size=3)
if cutoff is None:
cutoff = []
for thr in enumerate(cutoff):
ax_ts.plot((0, ntsteps - 1), [thr] * 2,
linewidth=.2, color='dimgray')
ax_ts.annotate(
'%.2f' % thr, xy=(0, thr), xytext=(-1, 0),
textcoords='offset points', va='center', ha='right',
color='dimgray', size=3)
ax_ts.plot(tseries, color=color, linewidth=1.5)
ax_ts.set_xlim((0, ntsteps - 1))
if gs_dist is not None:
ax_dist = plt.subplot(gs_dist)
sns.distplot(tseries, vertical=True, ax=ax_dist)
ax_dist.set_xlabel('Timesteps')
ax_dist.set_ylim(ax_ts.get_ylim())
ax_dist.set_yticklabels([])
return [ax_ts, ax_dist], gs
return ax_ts, gs
# for executive summmary report
# Azeez Adebimpe, 2021
def plotseries(conf,gs_ts,ylim=None,ylabelx=None,hide_x=None,tr=None,ax=None):
colums =conf.columns
notr = False
if tr is None:
notr = True
tr = 1.
xtick = np.linspace(0,conf.shape[0]*tr,num=conf.shape[0])
plt.style.use('seaborn-white')
plt.xticks(color='k')
plt.yticks(color='k')
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts,
width_ratios=[1, 100], wspace=0.0)
ax= plt.subplot(gs[1])
ax.grid(False)
for k in colums:
ax.plot(xtick,conf[k],label=k,linewidth=2)
if ylim:
ax.set_ylim(ylim)
else:
ax.set_ylim([-2*conf[k].max(),2*conf[k].max()])
ax.set_ylabel(ylabelx,fontsize=20)
ax.legend(fontsize=20)
last = conf.shape[0] - 1
interval = max((last // 10, last // 5, 1))
ax.set_xlim(0, last)
if not hide_x:
xticks = list(range(0, last)[::interval])
else:
xticks = []
ax.set_xticks(xticks)
if not hide_x:
if tr is None:
ax.set_xlabel("time (frame #)")
else:
ax.set_xlabel("time (s)")
ax.set_xticklabels(["%.01f" % t for t in (tr * np.array(xticks)).tolist()])
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
return ax
def plot_svgx(rawdata,regdata,resddata,fd,filenamebf,filenameaf,mask=None,seg=None,tr=1):
'''
generate carpet plot with dvars, fd, and WB
------------
rawdata:
nifti or cifti
regdata:
nifti or cifti after nuissance regression
resddata:
nifti or cifti after regression and filtering
mask:
mask for nifti if available
seg:
3 tissues seg files
tr:
repetition times
fd:
framewise displacement
filenamebf:
output file svg before processing
filenameaf:
output file svg after processing
'''
rxdata = compute_dvars(read_ndata(datafile=rawdata,maskfile=mask))
rgdata = compute_dvars(read_ndata(datafile=regdata,maskfile=mask))
rsdata = compute_dvars(read_ndata(datafile=resddata,maskfile=mask))
rgdata = compute_dvars(read_ndata(datafile=rawdata,maskfile=mask))
conf = pd.DataFrame({'Pre reg': rxdata, 'Post reg': rgdata, 'Post all': rsdata})
fdx = pd.DataFrame({'FD':np.loadtxt(fd)})
rw = read_ndata(datafile=rawdata,maskfile=mask)
rs = read_ndata(datafile=resddata,maskfile=mask)
wbbf = pd.DataFrame({'Mean':np.nanmean(rw,axis=0),'Std':np.nanstd(rw,axis=0)})
wbaf = pd.DataFrame({'Mean':np.nanmean(rs,axis=0),'Std':np.nanstd(rs,axis=0)})
if seg is not None:
atlaslabels = nb.load(seg).get_fdata()
else:
atlaslabels = None
#
plt.cla()
plt.clf()
figx = plt.figure(constrained_layout=True, figsize=(45,60))
grid = mgs.GridSpec(4, 1, wspace=0.0, hspace=0.05,height_ratios=[1,1,2.5,1])
confoundplotx(tseries=conf,gs_ts=grid[0],tr=tr,ylabel='DVARS',hide_x=True)
confoundplotx(tseries=wbbf,gs_ts=grid[1],tr=tr,hide_x=True,ylabel='WB')
plot_carpetX(func=rawdata,atlaslabels=atlaslabels,tr=tr,subplot=grid[2],legend=True,title='Raw')
confoundplotx(tseries=fdx,gs_ts=grid[3],tr=tr,hide_x=False,ylims=[0,1],ylabel='FD[mm]')
figx.savefig(filenamebf,bbox_inches="tight", pad_inches=None,dpi=300)
plt.cla()
plt.clf()
figy = plt.figure(constrained_layout=True, figsize=(45,60))
grid = mgs.GridSpec(4, 1, wspace=0.0, hspace=0.05,height_ratios=[1,1,2.5,1])
confoundplotx(tseries=conf,gs_ts=grid[0],tr=tr,ylabel='DVARS',hide_x=True)
confoundplotx(tseries=wbaf,gs_ts=grid[1],tr=tr,hide_x=True,ylabel='WB')
plot_carpetX(func=resddata,atlaslabels=atlaslabels,tr=tr,subplot=grid[2],legend=True,title='Processed')
confoundplotx(tseries=fdx,gs_ts=grid[3],tr=tr,hide_x=False,ylims=[0,1],ylabel='FD[mm]')
figy.savefig(filenameaf,bbox_inches="tight", pad_inches=None,dpi=300)
return filenamebf,filenameaf
def confoundplotx(
tseries,
gs_ts,
tr=None,
hide_x=True,
ylims=None,
ylabel=None
):
import seaborn as sns
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1.0
ntsteps = tseries.shape[0]
#tseries = np.array(tseries)
# Define nested GridSpec
gs = mgs.GridSpecFromSubplotSpec(
1, 2, subplot_spec=gs_ts, width_ratios=[1, 100], wspace=0.0
)
ax_ts = plt.subplot(gs[1])
ax_ts.grid(False)
# Set 10 frame markers in X axis
interval = max((ntsteps // 10, ntsteps // 5, 1))
xticks = list(range(0, ntsteps)[::interval])
ax_ts.set_xticks(xticks)
if not hide_x:
if notr:
ax_ts.set_xlabel("Time (frame #)")
else:
ax_ts.set_xlabel("Time (s)")
labels = tr * np.array(xticks)
ax_ts.set_xticklabels(["%.01f" % t for t in labels.tolist()])
else:
ax_ts.set_xticklabels([])
if ylabel:
ax_ts.set_ylabel(ylabel)
columns= tseries.columns
maxim_value =[]
minim_value =[]
for c in columns:
ax_ts.plot(tseries[c],label=c, linewidth=3)
maxim_value.append(max(tseries[c]))
minim_value.append(min(tseries[c]))
minx_value = [abs(x) for x in minim_value]
ax_ts.set_xlim((0, ntsteps - 1))
ax_ts.legend(fontsize=30)
if ylims:
ax_ts.set_ylim(ylims)
else:
ax_ts.set_ylim([-1.5*max(minx_value),1.5*max(maxim_value)])
for item in ([ax_ts.title, ax_ts.xaxis.label, ax_ts.yaxis.label] +
ax_ts.get_xticklabels() + ax_ts.get_yticklabels()):
item.set_fontsize(30)
return ax_ts, gs
|
the-stack_0_9606 | #from YourClassParentDir.YourClass import YourClass
from math import floor
import cv2
import numpy as np
import pandas as pd
#import multiprocessing as mp
from video_process.tracktor import Tracktor
class VideoCapture:
"""
VideoCapture is a class that takes a video, processes it, and returns it.
This means that VideoCapture is responsible for working with the data (Tracktor)
managing the data, adding and removing tracktor objects from the video as well as
retreiving and exporting data.
It is also related for video related functions such as play and pause.
Parameters
----------
video_source: string
This is the directory of the video that is to be processed.
"""
def __init__(self, video_source=""):
# Open the video source
self.cap = cv2.VideoCapture(video_source)
if not self.cap.isOpened():
raise ValueError("Unable to open video source", video_source)
#print(cv2.getBuildInformation())
#print(cv2.ocl.haveOpenCL())
#cv2.ocl.setUseOpenCL(True)
# Get video source width, height (resolution) and video length in frames
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.length = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.FPS = 60
#set the video framerate
self.cap.set(cv2.CAP_PROP_FPS, self.FPS)
#current frame is used to know what frame it is, as well as assigning frames
self.current_frame = 10
self.last_frame = self.current_frame
#playstate is used to play/pause the video
self.play_state = False
#working number is the index for which tracktor to process
self.working_number = 0
self.trackers = []
#a list of tuples with position to track and frame to assign on.
self.track_history = []
#List of gps coordinates, possible triangulation
self.gps_coord = []
#Ground Sample Distance variables
self.cam_distance = 0
self.cam_focal_length = 0
self.cam_sensor_width = 0
self.cam_sensor_height = 0
#the path to export the data
self.output_path = "./output/"
#tracking constants for getting frame types
self.TRACK_ALL = -1
self.NO_TRACKING = -2
#zoom variable for setting focused frame
self.zoom = 1
def draw_gps(self):
"""
Draws the GPS coordinated onto the current frame.
If 1 point, draw a circle
If 2 points, draw a line and calculate distance
If points > 3, draw a polygon and calculate distance of all the edges
"""
pass
def calculate_location(self, pos_x, pos_y):
"""
Calculates GPS location of a point passed in.
Based on the GPS points, the location will calculate distance and direction
to find the location.
"""
pass
def calculate_size(self, tracktor):
"""
Based on calculated distance of the GPS coordinates, size of the object
is calculated.
This should be the distance between the two farthest points (extreme points)
Based on pixel length, calculate the related length.
"""
pass
def create_tracker_pos(self, pos_x, pos_y):
"""
This function creates a new coordinate in history according to current frame
"""
if self.working_number >= 0:
location = (pos_x, pos_y, self.working_number, self.current_frame)
self.track_history.append(location)
print("Adding clicked location to", end="")
print(self.track_history[-1])
def delete_tracker_pos(self, frame_number):
"""
This function removes an assignment on a given frame
"""
def set_tracker_pos(self, tracktor):
"""
This function sets the tracker position at a given frame
"""
for i in range(len(self.track_history)):
#if frame number is equal to set frame ex: (x,y,working_number,frame)
if self.current_frame == self.track_history[i][3]:
tracktor_index = self.find_tracker_index_by_id(tracktor.id)
#if the saved tracktor in the list matches the saved working_number
if tracktor_index == self.track_history[i][2]:
#assign that tracktor's clicked to the saved coordinates(x,y)
self.trackers[tracktor_index].clicked = (self.track_history[i][0],
self.track_history[i][1])
# print("Assigning point from history at:", end="")
# print(self.track_history[i])
def play(self):
"""
Sets the play_state of the video to play, if not already.
"""
if self.play_state is False:
self.play_state = True
def pause(self):
"""
Sets the play_state of the video to pause, if not already.
"""
#pause only if play is set
if self.play_state is True:
print("Pausing")
self.play_state = False
def set_frame(self, value):
"""
Sets the current frame to process to the value passed in.
Parameters
----------
Value: float
Assigns the current_frame
"""
value = floor(float(value))
self.current_frame = value
self.cap.set(cv2.CAP_PROP_POS_FRAMES, value)
def previous_frame(self):
"""
Sets the current frame to process to the previous frame.
"""
self.set_frame(self.current_frame-1)
def next_frame(self):
"""
Sets the current frame to process to the next frame.
"""
self.set_frame(self.current_frame+1)
def add_tracker(self):
"""
Appends a Tracktor object to the trackers list.
"""
self.trackers.append(Tracktor())
def delete_tracker(self, index):
"""
!NOT COMPLETE!
Removes a tracktor object from the trackers list
"""
del self.trackers[index]
#search the list of trackers by name and return -1 if not fouond
def find_tracker_index_by_id(self, name):
"""
Finds the index in trackers where the name matches the tracktor's id.
Parameters
----------
name: string
compared to the tracktor's id
"""
if name == "None":
return self.NO_TRACKING
elif name == "All":
return self.TRACK_ALL
else:
for i in range(len(self.trackers)):
if name == self.trackers[i].id:
return i
return -1
def set_tracker_offset(self, value):
"""
Sets the working_number tracktor's offset to the value passed in.
Offset is the constant subtracted from the mean value within the block
Parameters
----------
value: float
"""
self.trackers[self.working_number].offset = value
def set_tracker_blocksize(self, value):
"""
Sets the working_number tracktor's block_size to the value passed in.
block_size determines the width of the kernel used for adaptive thresholding.
Note: block_size must be odd. This is automatically handled.
Parameters
----------
value: float
"""
if value % 2 == 0:
value += 1
self.trackers[self.working_number].block_size = value
def set_tracker_minarea(self, value):
"""
Sets the working_number tracktor's min_area to the value passed in.
min_area is the minimum area threhold used to detect the object of interest.
Parameters
----------
value: float
"""
self.trackers[self.working_number].min_area = value
def set_tracker_maxarea(self, value):
"""
Sets the working_number tracktor's max_area to the value passed in.
max_area is the maximum area threhold used to detect the object of interest.
Parameters
----------
value: float
"""
self.trackers[self.working_number].max_area = value
def set_zoom(self, value):
"""
Sets the zoom to adjust region of interest on a specific tracktor
Parameters:
value: float
The zoom multiplier
"""
self.zoom = float(value)
def get_frame(self, tracking=0):
"""
Returns a processed frame based on what tracking value is passed in
Parameters
----------
tracking: int
determines what to track.
(-2: NONE, -1 ALL, 0...n working_number tracking index)
"""
if self.cap.isOpened():
#initialize ret to false so we enter the while loop
ret = False
#if we cannot retreive the frame, continue onto the next one
while ret is False:
if self.play_state is False:
self.set_frame(self.current_frame - 1)
#grab a frame
ret, frame = self.cap.read()
#use openCL on this data when it can.
frame = cv2.UMat(frame)
#set the current frame number to the frame we just received
self.current_frame = self.cap.get(cv2.CAP_PROP_POS_FRAMES)
if tracking == self.NO_TRACKING:
return (True, frame)
elif tracking == self.TRACK_ALL:
ret, final = self.show_all(frame)
else:
ret, final = self.process(frame, self.trackers[tracking])
ret, final = self.get_focused_frame(final, self.trackers[tracking], self.zoom)
if ret:
final = final.get()
#when we retreive a new frame, we can assume we updated values with it
return (ret, final)
else:
frame = frame.get()
print("unprocessed")
return(True, frame)
def get_focused_frame(self, frame, tracktor, zoom):
"""
Returns a frame centered and zoomed in on the
individual being tracked.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
tracktor: tracktor object
Contains data and basic functions for tracked individual
zoom: int
The value in pixels to be zoomed in.
This is the number of pixels to be zoomed in on all sides;
the original aspect ratio is adjusted.
"""
try:
frame = frame.get()
#create point from tracked individual
pos_x = int(floor(tracktor.meas_now[0][0]))
pos_y = int(floor(tracktor.meas_now[0][1]))
min_y = int(pos_y - (self.height/zoom))
max_y = int(pos_y + (self.height/zoom))
min_x = int(pos_x -(self.width/zoom))
max_x = pos_x + int(self.width/zoom)
if min_y < 0:
min_y = 0
if min_x < 0:
min_x = 0
if min_y >= 0 and max_y <= self.height and min_x >= 0 and max_x <= self.width:
roi = frame[min_y:max_y,
min_x:max_x]
roi = cv2.UMat(roi)
cv2.imshow("resize", roi)
return (True, roi)
else:
frame = cv2.UMat(frame)
return (True, frame)
#roi = cv2.resize(roi, (int(self.width), int(self.height)))
# # #calculate edges based on points
# min_x = int(pos_x - zoom)
# max_x = int(pos_x + zoom)
# min_y = int(pos_y - zoom)
# max_y = int(pos_y + zoom)
# #keeping aspect ratio solves constant oblongness
# original_aspect = self.width/self.height
# zoomed_aspect = (max_x - min_x)/(max_y - min_y)
# print(zoomed_aspect)
# #difference between ratios needed to change
# adjust_aspect = zoomed_aspect - original_aspect
# #ratio is applied to current height
# adjust_height = (max_y - min_y) * adjust_aspect
# #ratio is applied to current width
# adjust_width = (max_x - min_x) * adjust_aspect
# #when height ratio is off
# if original_aspect > zoomed_aspect:
# #subtract half the ammount needed to meet original aspect
# min_y = int(min_y - (adjust_height/2))
# #add half the ammount needed to meet original aspect
# max_y = int(max_y + (adjust_height/2))
# #when width ratio is off
# elif original_aspect < zoomed_aspect:
# #subtract half the ammount needed to meet original aspect
# min_x = int(min_x - (adjust_width/2))
# #add half the ammount needed to meet original aspect
# max_x = int(max_x + (adjust_width/2))
# NOTE: CAUSE OF DISTORTION, we need the outer edge to stop moving as well
# #limit zoom to video edge
# # region of interest
# roi = frame[min_y:max_y, min_x:max_x]
except:
print("Cannot focus frame")
frame = cv2.UMat(frame)
return (True, frame)
def show_all(self, frame, detail=True):
"""
Returns a frame that shows all of the tracked individuals.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
detail: bool
determines whether or not to display contours,
min_area circle and max_area circle.
"""
#iterate through all
try:
final = frame
ret = True
for i in range(len(self.trackers)):
#accumulate tracker's processes onto final frame
ret, final = self.process(final, self.trackers[i])
if ret is True and detail is False:
cv2.circle(frame, tuple([int(x) for x in self.trackers[i].meas_now[0]]), 5,
self.trackers[i].colour, -1, cv2.LINE_AA)
if detail is True:
return (True, final)
else:
return (True, frame)
except:
print("cannot track more than one individual")
return frame
def process(self, frame, tracktor):
"""
This function takes a frame, and a tracked individua and performs operations
on the frame and applies information to the tracktor such as x,y coordinates
First it applies a threshold, erodes and dialates to reduce noise
Before measuring contours, it records the previous coordinates of the tracker
Second, it applies contours to each clustered individual
Last, hungarian_algorithm calculates minimum cost between frames to continue tracking then
Reorder_and_draw then draws the center dot, and min/max area circles
Parameters
----------
tracktor: Tracktor Object
The object containing all the data to be processed
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
"""
try:
if len(self.track_history) > 0:
self.set_tracker_pos(tracktor)
#eliminate small noise
thresh = tracktor.colour_to_thresh(frame)
# cv2.imshow("thresh", thresh)
thresh = cv2.erode(thresh, tracktor.kernel, iterations=1)
# cv2.imshow("dialate", thresh)
thresh = cv2.dilate(thresh, tracktor.kernel, iterations=1)
# cv2.imshow("erode", thresh)
#x, y coordinates of previous tracktor if meas_now is not empty
if tracktor.meas_now:
pos_x = tracktor.meas_now[0][0]
pos_y = tracktor.meas_now[0][1]
else:
# self.pause()
print("Unable to track " + tracktor.id)
#from our current frame, draw contours and display it on final frame
final, contours = tracktor.detect_and_draw_contours(frame, thresh.get())
# cv2.imshow("detect_and_draw", final)
#detect if the tracker is changed
changed = self.tracker_changed(pos_x, pos_y, contours)
if changed is True:
# self.pause()
print(tracktor.id + "has changed")
row_ind, col_ind = tracktor.hungarian_algorithm()
#try to re-draw, separate try-except block allows redraw of min_area/max_area
final = tracktor.reorder_and_draw(final, col_ind, self.current_frame)
return (True, final)
except:
print("Cannot Process Frame.")
return (False, frame)
def tracker_changed(self, pos_x, pos_y, contours):
"""
NOTE: Function name needs a change.
This function checks if the (pos_x, pos_y) coordinate passed in exists
within the contours that are passed in.
This can either be used to select and assign contours to a tracker,
or check if tracker has changed from it's last position to new contours.
Parameters
----------
pos_x: float
x coordinate on frame
pos_y: float
y coordinate on frame
contours: list
a list of all detected contours that pass the area based threhold criterion
"""
#assign default flag to True (assume changed until proven not)
changed_tracker_flag = True
#if contours exist (not empty)
if contours:
#we look at all the contours
for contour in contours:
#check if previous position exists in updated contour (1= Yes, -1= No)
dist = cv2.pointPolygonTest(contour, (pos_x, pos_y), False)
# print(dist)
#if previous point exists in the same contour, set changed flag to false
if dist != -1.0:
changed_tracker_flag = False
if changed_tracker_flag is True:
print("changed contours")
return changed_tracker_flag
# if no contours exist, we cannot process anything
else:
print("Unable to track ")
return changed_tracker_flag
def export_all(self):
"""
Iterates through the video collecting the data of each tracktor in trackers the list.
Once data is collected, it exports it in a Pandas dataframe with the frame number,
x and y coordinates.
Each individual exports it's own CSV file.
"""
#self.set_frame_pos(1)
#print("setting fame to start:" + str(self.current_frame))
#sets the process to process ALL
self.working_number = self.find_tracker_index_by_id("ALL")
ret = True
#we want to process as fast as we can(1000 fps should be good)
self.cap.set(cv2.CAP_PROP_FPS, 1000)
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame)
#we want playstate to be true so get_frame will work
self.play_state = True
#reset all tracktor's data
for i in range(len(self.trackers)):
self.trackers[i].df = []
# while self.current_frame < self.length:
while self.current_frame < 1030:
# Get a frame from the video source, already processed
ret, frame = self.get_frame(self.working_number)
print("loading: " + str(int(self.current_frame)) + " of "+ str(int(self.length)))
#frame already processed, retreive data from that frame, store it in each trackers
for i in range(len(self.trackers)):
#ignore duplicate frame
if len(self.trackers[i].df) > 1:
last_frame = self.trackers[i].df[i-1][0]
#it is the first frame and we can simulate the previous_frame
else:
last_frame = self.current_frame-1
#try to append data
try:
#if we have a new frame, append it
if self.current_frame != last_frame:
self.trackers[i].df.append([self.current_frame,
self.trackers[i].meas_now[0][0],#store X coord
self.trackers[i].meas_now[0][1] #store Y coord
])
#we received bad data and cannot process it. return -1
except:
print("Could not get location from " + self.trackers[i].id +
" at frame " + str(self.current_frame)
)
self.trackers[i].df.append([self.current_frame, -1, -1])
self.cap.set(cv2.CAP_PROP_FPS, self.FPS)
print("Starting to export....")
#once done processing the video (last frame complete), export to file
for i in range(len(self.trackers)):
print("Exporting: " + self.trackers[i].id)
#load our data into a pandas dataframe
self.trackers[i].df = pd.DataFrame(np.matrix(self.trackers[i].df),
columns=['frame', 'pos_x', 'pos_y'])
#export the data into a csv file
self.trackers[i].df.to_csv(self.output_path + "csv/" + self.trackers[i].id + ".csv")
# Release the video source when the object is destroyed
def __del__(self):
if self.cap.isOpened():
self.cap.release()
|
the-stack_0_9608 | # This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Helper method to create a API generator based on the current configuration
in the environment valriables.
"""
from contextlib import contextmanager
from typing import Dict, Optional
from flowserv.config import Config
from flowserv.service.api import API, APIFactory
from flowserv.service.local import LocalAPIFactory
import flowserv.config as config
# -- API factory pattern for client applications ------------------------------
def ClientAPI(
env: Optional[Dict] = None, basedir: Optional[str] = None,
database: Optional[str] = None, open_access: Optional[bool] = None,
run_async: Optional[bool] = None, user_id: Optional[str] = None
) -> APIFactory:
"""Create an instance of the API factory that is responsible for generating
API instances for a flowserv client.
The main distinction here is whether a connection is made to a local instance
of the service or to a remote instance. This distinction is made based on
the value of the FLOWSERV_CLIENT environment variable that takes the values
'local' or 'remote'. The default is 'local'.
Provides the option to alter the default settings of environment variables.
Parameters
----------
env: dict, default=None
Dictionary with configuration parameter values.
basedir: string, default=None
Base directory for all workflow files. If no directory is given or
specified in the environment a temporary directory will be created.
database: string, default=None
Optional database connect url.
open_access: bool, default=None
Use an open access policy if set to True.
run_async: bool, default=False
Run workflows in asynchronous mode.
user_id: string, default=None
Optional identifier for the authenticated API user.
Returns
-------
flowserv.service.api.APIFactory
"""
# Get the base configuration settings from the environment if not given.
env = env if env is not None else config.env()
if not isinstance(env, Config):
env = Config(env)
# Update configuration based on the given optional arguments.
if basedir is not None:
env.basedir(basedir)
if database is not None:
env.database(database)
if open_access is not None and open_access:
env.open_access()
# By default, the client runs all workflows synchronously.
if run_async is not None and run_async:
env.run_async()
elif env.get(config.FLOWSERV_ASYNC) is None:
env.run_sync()
# Create local or remote API factory depending on the FLOWSERV_CLIENT value.
client = env.get(config.FLOWSERV_CLIENT, config.LOCAL_CLIENT)
if client == config.LOCAL_CLIENT:
return LocalAPIFactory(env=env, user_id=user_id)
elif client == config.REMOTE_CLIENT:
# Not implemented yet.
pass
raise ValueError("inalid client type '{}'".format(client))
@contextmanager
def service() -> API:
"""Context manager that returns a service API that was instantiated from the
current configuration settings in the environment.
Returns
-------
flowserv.service.api.API
"""
# Create the API factory from the current environment settings.
factory = ClientAPI()
with factory() as api:
yield api
|
the-stack_0_9611 | from zope.security.interfaces import Unauthorized
from zope.testbrowser.browser import LinkNotFoundError
import pytest
EVENT_VIEW_CONFIGURATION_ADD_TEXT = 'event view configuration'
def test_masterdata__Table__1(address_book, browser):
"""It allows to navigate to the event views list."""
browser.login('cal-visitor')
browser.open(browser.CALENDAR_MASTERDATA_URL)
browser.getLink('Event views').click()
assert browser.url == browser.CALENDAR_MASTERDATA_EVENTVIEW_URL
def test_masterdata__Table__2(address_book, browser):
"""It renders a message if there are no event view configurations yet."""
browser.login('cal-visitor')
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
assert 'No event views defined yet.' in browser.contents
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Table__3(address_book, browser, login):
"""It renders no add link for any calendar user."""
browser.login(login)
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
with pytest.raises(LinkNotFoundError):
browser.getLink(EVENT_VIEW_CONFIGURATION_ADD_TEXT)
def test_masterdata__Table__4(address_book, browser):
"""It prevents access for anonymous."""
browser.handleErrors = False # needed to catch exception
with pytest.raises(Unauthorized):
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
def test_masterdata__Add__1(address_book, browser):
"""It allows administrators to add a new category in the list."""
browser.login('mgr')
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
browser.getLink(EVENT_VIEW_CONFIGURATION_ADD_TEXT).click()
assert browser.CALENDAR_EVENTVIEW_CONFIGURATION_ADD_URL == browser.url
browser.getControl('title').value = 'default'
browser.getControl('Add').click()
assert '"default" added.' == browser.message
# The new configuration shows up in the list:
assert '>default<' in browser.contents
def test_masterdata__Add__2(
address_book, EventViewConfigurationFactory, browser):
"""It prevents adding a new config with an already existing title."""
EventViewConfigurationFactory(address_book, u'default')
browser.login('mgr')
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_ADD_URL)
browser.getControl('title').value = 'default'
browser.getControl('Add').click()
assert 'There were some errors.' in browser.contents
assert 'This title is already used for an ' in browser.contents
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Add__3(address_book, browser, login):
"""It is not accessible for any calendar user."""
browser.login(login)
browser.assert_forbidden(browser.CALENDAR_EVENTVIEW_CONFIGURATION_ADD_URL)
def test_masterdata__Edit__1(
address_book, EventViewConfigurationFactory, CategoryFactory, browser):
"""It allows to edit a category."""
EventViewConfigurationFactory(address_book, u'default')
CategoryFactory(address_book, u'foo')
CategoryFactory(address_book, u'bar')
browser.login('mgr')
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
browser.getLink('default').click()
assert browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL == browser.url
assert 'default' == browser.getControl('title').value
browser.getControl('title').value = 'alternative'
browser.getControl('start date').displayValue = ['3 days in past']
browser.getControl('duration').displayValue = ['3 weeks']
browser.getControl('categories').displayValue = ['bar']
browser.getControl('show fields').displayValue = ['persons']
browser.getControl('Save').click()
assert 'Data successfully updated.' == browser.message
# The changed category name shows up in the list:
assert 'alternative' in browser.contents
browser.getLink('alternative').click()
assert browser.getControl('title').value == 'alternative'
assert browser.getControl('start date').displayValue == ['3 days in past']
assert browser.getControl('duration').displayValue == ['3 weeks']
assert browser.getControl('categories').displayValue == ['bar']
assert browser.getControl('show fields').displayValue == ['persons']
def test_masterdata__Edit__2(
address_book, EventViewConfigurationFactory, browser):
"""It prevents changing a category title to an existing one."""
EventViewConfigurationFactory(address_book, u'default')
EventViewConfigurationFactory(address_book, u'alternative')
browser.login('mgr')
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL)
browser.getControl('title').value = 'alternative'
browser.getControl('Save').click()
assert 'There were some errors.' in browser.contents
assert 'This title is already used for an ' in browser.contents
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Edit__3(
address_book, EventViewConfigurationFactory, browser, login):
"""It allows calendar users only to see the event view configuration data.
But they cannot change or delete them.
"""
EventViewConfigurationFactory(address_book, u'foo')
browser.login(login)
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL)
# There are no fields and no delete button:
assert (['form.buttons.apply', 'form.buttons.cancel'] ==
browser.all_control_names)
def test_masterdata__Delete__1(
address_book, EventViewConfigurationFactory, browser):
"""It allows to delete an event view configuration."""
EventViewConfigurationFactory(address_book, u'default')
browser.login('mgr')
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL)
browser.getControl('Delete').click()
assert browser.CALENDAR_EVENTVIEW_CONFIGURATION_DELETE_URL == browser.url
assert ('Do you really want to delete this event view configuration?' in
browser.contents)
browser.getControl('Yes').click()
assert '"default" deleted.' == browser.message
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Delete__2(
address_book, EventViewConfigurationFactory, browser, login):
"""It is not accessible for any calendar user."""
EventViewConfigurationFactory(address_book, u'foo')
browser.login(login)
browser.assert_forbidden(
browser.CALENDAR_EVENTVIEW_CONFIGURATION_DELETE_URL)
|
the-stack_0_9612 | from setuptools import setup, find_packages
PACKAGE_NAME = "lintreview"
VERSION = "0.14.0"
requirements = open('./requirements.txt', 'r')
setup(
name=PACKAGE_NAME,
version=VERSION,
description="""
Lint Review, an automated code review tool that integrates with github.
Integrates with the github API & a variety of code checking tools.
""",
author="Mark story",
author_email="[email protected]",
packages=find_packages(),
entry_points={
'console_scripts': [
'lintreview = lintreview.cli:main',
],
},
install_requires=requirements.readlines(),
)
|
the-stack_0_9613 | from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class DialaphoneSpider(BaseSpider):
name = 'dialaphone.co.uk'
allowed_domains = ['dialaphone.co.uk']
start_urls = ['http://www.dialaphone.co.uk/pay-as-you-go/']
def parse(self, response):
hxs = HtmlXPathSelector(response)
urls = hxs.select('//*[@id="ulManufacturerLinks"]/li/a/@href').extract()
for url in urls:
yield Request(url, callback=self.parse_categories)
def parse_categories(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@class="List"]/tr')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'td[@class="DealIncludes"]/a[@class="PhoneName"]/text()')
loader.add_xpath('url', 'td[@class="DealIncludes"]/a[@class="PhoneName"]/@href')
price = 0.0
if product.select('td[@class="Price"]/text()'):
price = product.select('td[@class="Price"]/text()').extract()[0]
loader.add_value('price', price)
yield loader.load_item()
|
the-stack_0_9614 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.md') as history_file:
history = history_file.read()
install_requires = [
'torch<2,>=1.0',
'torchvision<1,>=0.4.2',
'scikit-learn<0.23,>=0.21',
'numpy<2,>=1.17.4',
'pandas<0.26,>=0.24',
]
setup_requires = [
'pytest-runner>=2.11.1',
]
tests_require = [
'pytest>=3.4.2',
'pytest-cov>=2.6.0',
]
development_requires = [
# general
'bumpversion>=0.5.3',
'pip>=9.0.1',
'watchdog>=0.8.3',
# docs
'm2r>=0.2.0',
'Sphinx>=1.7.1',
'sphinx_rtd_theme>=0.2.4',
'autodocsumm>=0.1.10',
# style check
'flake8>=3.7.7',
'isort>=4.3.4',
# fix style issues
'autoflake>=1.2',
'autopep8>=1.4.3',
# distribute on PyPI
'twine>=1.10.0',
'wheel>=0.30.0',
# Advanced testing
'coverage>=4.5.1',
'tox>=2.9.1',
]
setup(
author='MIT Data To AI Lab',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description='Conditional GAN for Tabular Data',
entry_points={
'console_scripts': [
'ctgan=ctgan.__main__:main'
],
},
extras_require={
'test': tests_require,
'dev': development_requires + tests_require,
},
install_package_data=True,
install_requires=install_requires,
license='MIT license',
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
include_package_data=True,
keywords='ctgan CTGAN',
name='ctgan',
packages=find_packages(include=['ctgan', 'ctgan.*']),
python_requires='>=3.5',
setup_requires=setup_requires,
test_suite='tests',
tests_require=tests_require,
url='https://github.com/sbuttler/CTGAN',
version='0.2.2.dev0',
zip_safe=False,
)
|
the-stack_0_9617 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
This is a poor man's port of set_up_volume.sh to allow `image_package` to
emit btrfs loopbacks. In ~1 weeks' time, this will be replaced by a
better-tested, more robust, and more coherent framework for handling images
and loopbacks.
'''
import logging
import os
import subprocess
import sys
import tempfile
from typing import Optional
from .common import byteme, get_file_logger, run_stdout_to_err
from .unshare import Unshare, nsenter_as_root, nsenter_as_user
log = get_file_logger(__file__)
MiB = 2 ** 20
# Otherwise, `mkfs.btrfs` fails with:
# ERROR: minimum size for each btrfs device is 114294784
MIN_CREATE_BYTES = 109 * MiB
# The smallest size, to which btrfs will GROW a tiny filesystem. For
# lower values, `btrfs resize` prints:
# ERROR: unable to resize '_foo/volume': Invalid argument
# MIN_GROW_BYTES = 175 * MiB
#
# When a filesystem's `min-dev-size` is small, `btrfs resize` below this
# limit will fail to shrink with `Invalid argument`.
MIN_SHRINK_BYTES = 256 * MiB
def _round_to_loop_block_size(num_bytes: int, log_level: int) -> int:
'''
Avoid T24578982: btrfs soft lockup: `losetup --set-capacity /dev/loopN`
wrongly sets block size to 1024 when backing file size is 4096-odd.
Future: maybe we shouldn't hardcode 4096, but instead query:
blockdev --getbsz /dev/loopSOMETHING
'''
block_size = 4096
rounded = num_bytes + (block_size - (num_bytes % block_size)) % block_size
if num_bytes != rounded:
log.log(
log_level,
f'Rounded image size {num_bytes} up to {rounded} to avoid kernel '
'bug.'
)
return rounded
def _create_or_resize_image_file(
path: bytes, at_least_bytes: int, log_level: int=logging.INFO,
):
'''
Be sure to call `btrfs filesystem resize` and `losetup --set-capacity`
in the appropriate order.
'''
rounded_bytes = _round_to_loop_block_size(at_least_bytes, log_level)
run_stdout_to_err([
'truncate', '-s', str(rounded_bytes), path,
], check=True)
def _fix_up_fs_size(size_bytes: int, min_usable_fs_size: int) -> int:
if size_bytes < min_usable_fs_size:
log.warning(
f'btrfs cannot use a size of {size_bytes} < {min_usable_fs_size} '
'bytes, will use the larger size'
)
return min_usable_fs_size
return size_bytes
def _format_image_file(path: bytes, size_bytes: int) -> int:
'Returns the actual filesystem size, which may have been increased.'
size_bytes = _fix_up_fs_size(size_bytes, MIN_CREATE_BYTES)
log.info(f'Formatting btrfs {size_bytes}-byte FS at {path}')
_create_or_resize_image_file(path, size_bytes)
# Note that this can fail with 'cannot check mount status' if the
# host is in a bad state:
# - a file backing a loop device got deleted, or
# - multiple filesystems with the same UUID got mounted as a loop
# device, breaking the metadata for the affected loop device (this
# latter issue is a kernel bug).
# We don't check for this error case since there's nothing we can do to
# remediate it.
run_stdout_to_err(['mkfs.btrfs', path], check=True)
return size_bytes
def _mount_image_file(
unshare: Optional[Unshare], file_path: bytes, mount_path: bytes,
) -> bytes:
log.info(f'Mounting btrfs {file_path} at {mount_path}')
# Explicitly set filesystem type to detect shenanigans.
run_stdout_to_err(nsenter_as_root(
unshare, 'mount', '-t', 'btrfs', '-o', 'loop,discard,nobarrier',
file_path, mount_path,
), check=True)
loop_dev = subprocess.check_output(nsenter_as_user(
unshare, 'findmnt', '--noheadings', '--output', 'SOURCE',
mount_path,
)).rstrip(b'\n')
# This increases the chances that --direct-io=on will succeed, since one
# of the common failure modes is that the loopback's sector size is NOT
# a multiple of the sector size of the underlying device (the devices
# we've seen in production have sector sizes of 512, 1024, or 4096).
if run_stdout_to_err([
'sudo', 'losetup', '--sector-size=4096', loop_dev,
]).returncode != 0:
log.error(
f'Failed to set --sector-size=4096 for {loop_dev}, setting '
'direct IO is more likely to fail.'
)
# This helps perf and avoids doubling our usage of buffer cache.
# Also, when the image is on tmpfs, setting direct IO fails.
if run_stdout_to_err([
'sudo', 'losetup', '--direct-io=on', loop_dev,
]).returncode != 0:
log.error(
f'Could not enable --direct-io for {loop_dev}, expect worse '
'performance.'
)
return loop_dev
def _minimize_image_size(
*, unshare: Optional[Unshare], cur_size: int, image_path: bytes,
mount_path: bytes, loop_dev: bytes,
) -> int:
'Returns the new filesystem size.'
min_size_out = subprocess.check_output(nsenter_as_root(
unshare, 'btrfs', 'inspect-internal', 'min-dev-size', mount_path,
)).split(b' ')
assert min_size_out[1] == b'bytes'
min_size = _fix_up_fs_size(int(min_size_out[0]), MIN_SHRINK_BYTES)
if min_size >= cur_size:
log.info(
f'Nothing to do: the minimum resize limit {min_size} is no less '
f'than the current filesystem size of {cur_size} bytes.'
)
return
log.info(f'Shrinking {image_path} to the btrfs minimum, {min_size} bytes')
run_stdout_to_err(nsenter_as_root(
unshare, 'btrfs', 'filesystem', 'resize', str(min_size),
mount_path,
), check=True)
fs_bytes = int(subprocess.check_output(nsenter_as_user(
unshare, 'findmnt', '--bytes', '--noheadings', '--output', 'SIZE',
mount_path,
)))
# Log an error on size rounding since this is not expected to need it.
_create_or_resize_image_file(image_path, fs_bytes, log_level=logging.ERROR)
run_stdout_to_err([
'sudo', 'losetup', '--set-capacity', loop_dev,
], check=True)
return min_size
class LoopbackVolume:
def __init__(
self, unshare: Optional[Unshare], image_path: bytes, size_bytes: int,
):
self._unshare = unshare
self._temp_dir_ctx = tempfile.TemporaryDirectory() # noqa: P201
self._size_bytes = size_bytes
self._image_path = byteme(os.path.abspath(image_path))
self._temp_dir: Optional[bytes] = None
self._mount_dir: Optional[bytes] = None
def __enter__(self) -> 'LoopbackVolume':
self._temp_dir = byteme(
os.path.abspath(self._temp_dir_ctx.__enter__())
)
try:
self._size_bytes = _format_image_file(
self._image_path, self._size_bytes
)
self._mount_dir = os.path.join(self._temp_dir, b'volume')
os.mkdir(self._mount_dir)
self._loop_dev = _mount_image_file(
self._unshare, self._image_path, self._mount_dir,
)
except BaseException:
self.__exit__(*sys.exc_info())
raise
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> bool:
'This only suppresses exceptions if TemporaryDirectory.__exit__ does.'
if self._mount_dir:
# If this throws, we won't be able to clean up `_mount_dir`, so
# let the error fly. If the loopback is inside an Unshare
# object, the mount itself will eventually get cleaned up, but
# we don't have ownership to trigger Unshare cleanup, and in any
# case, that kind of clean-up is asynchronous, and would be
# tricky to await properly.
#
# NB: It's possible to use tmpfs and namespaces to guarantee
# cleanup, but it's just an empty directory in `/tmp`, so it's
# really not worth the complexity.
self.unmount_if_mounted()
return self._temp_dir_ctx.__exit__(exc_type, exc_val, exc_tb)
def unmount_if_mounted(self):
if self._mount_dir:
# Nothing might have been mounted, ignore exit code
run_stdout_to_err(
nsenter_as_root(self._unshare, 'umount', self._mount_dir),
)
def dir(self) -> bytes:
return self._mount_dir
def minimize_size(self) -> int:
'Returns the new image size.'
self._size_bytes = _minimize_image_size(
unshare=self._unshare,
cur_size=self._size_bytes,
image_path=self._image_path,
mount_path=self._mount_dir,
loop_dev=self._loop_dev,
)
return self._size_bytes
|
the-stack_0_9618 | import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from models.connections.conv_2d_bn_activ import Conv2DBNActiv
from models.connections.resblock import ResBlock
from chainercv.links import PickableSequentialChain
class SERes2Net(PickableSequentialChain):
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3]
}
def __init__(self, n_layer,
n_class=None, scale=4,
pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
blocks = self._blocks[n_layer]
self.mean = mean
if initialW is None:
initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
if 'initialW' not in fc_kwargs:
fc_kwargs['initialW'] = initializers.Normal(scale=0.01)
kwargs = {
'scale': scale, 'initialW': initialW, 'stride_first': True,
'add_seblock': True}
super(SERes2Net, self).__init__()
with self.init_scope():
self.conv1 = Conv2DBNActiv(None, 64, 3, 1, 1, nobias=True,
initialW=initialW)
self.res2 = ResBlock(blocks[0], None, 64, 256, 2, **kwargs)
self.res3 = ResBlock(blocks[1], None, 128, 512, 1, **kwargs)
self.res4 = ResBlock(blocks[2], None, 256, 1024, 2, **kwargs)
self.res5 = ResBlock(blocks[3], None, 512, 2048, 1, **kwargs)
self.pool5 = lambda x: F.average(x, axis=(2, 3))
self.fc6 = L.Linear(None, n_class, **fc_kwargs)
class SERes2Net50(SERes2Net):
def __init__(self, n_class=10, scale=4, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SERes2Net50, self).__init__(
50, n_class, scale, pretrained_model,
mean, initialW, fc_kwargs)
class SERes2Net101(SERes2Net):
def __init__(self, n_class=10, scale=4, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SERes2Net101, self).__init__(
101, n_class, scale, pretrained_model,
mean, initialW, fc_kwargs)
class SERes2Net152(SERes2Net):
def __init__(self, n_class=10, scale=4, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SERes2Net152, self).__init__(
152, n_class, scale, pretrained_model,
mean, initialW, fc_kwargs)
|
the-stack_0_9619 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/14 16:30
Desc: ็ณไธๆๆฐ-็ณไธไธ็บงใไบ็บงๅไธ็บง
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
็ณไธ-ๅธๅบ่กจๅพๅฎๆถ่กๆ
ๆฐๆฎ
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: ๅธๅบ่กจๅพๅฎๆถ่กๆ
ๆฐๆฎ
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["ๆๆฐไปฃ็ ", "ๆๆฐๅ็งฐ", "ๆจๆถ็", "ไปๅผ็", "ๆไบค้ข", "ๆ้ซไปท", "ๆไฝไปท", "ๆๆฐไปท", "ๆไบค้"]
temp_df["ๆจๆถ็"] = pd.to_numeric(temp_df["ๆจๆถ็"])
temp_df["ไปๅผ็"] = pd.to_numeric(temp_df["ไปๅผ็"])
temp_df["ๆไบค้ข"] = pd.to_numeric(temp_df["ๆไบค้ข"])
temp_df["ๆ้ซไปท"] = pd.to_numeric(temp_df["ๆ้ซไปท"])
temp_df["ๆไฝไปท"] = pd.to_numeric(temp_df["ๆไฝไปท"])
temp_df["ๆๆฐไปท"] = pd.to_numeric(temp_df["ๆๆฐไปท"])
temp_df["ๆไบค้"] = pd.to_numeric(temp_df["ๆไบค้"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
็ณไธไธ็บง่กไธ-ๅฎๆถ่กๆ
ๆฐๆฎ
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: ็ณไธไธ็บง่กไธๅฎๆถ่กๆ
ๆฐๆฎ
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["ๆๆฐไปฃ็ ", "ๆๆฐๅ็งฐ", "ๆจๆถ็", "ไปๅผ็", "ๆไบค้ข", "ๆ้ซไปท", "ๆไฝไปท", "ๆๆฐไปท", "ๆไบค้"]
temp_df["ๆจๆถ็"] = pd.to_numeric(temp_df["ๆจๆถ็"])
temp_df["ไปๅผ็"] = pd.to_numeric(temp_df["ไปๅผ็"])
temp_df["ๆไบค้ข"] = pd.to_numeric(temp_df["ๆไบค้ข"])
temp_df["ๆ้ซไปท"] = pd.to_numeric(temp_df["ๆ้ซไปท"])
temp_df["ๆไฝไปท"] = pd.to_numeric(temp_df["ๆไฝไปท"])
temp_df["ๆๆฐไปท"] = pd.to_numeric(temp_df["ๆๆฐไปท"])
temp_df["ๆไบค้"] = pd.to_numeric(temp_df["ๆไบค้"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
็ณไธไบ็บง่กไธ-ๅฎๆถ่กๆ
ๆฐๆฎ
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: ็ณไธไบ็บง่กไธ-ๅฎๆถ่กๆ
ๆฐๆฎ
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["ๆๆฐไปฃ็ ", "ๆๆฐๅ็งฐ", "ๆจๆถ็", "ไปๅผ็", "ๆไบค้ข", "ๆ้ซไปท", "ๆไฝไปท", "ๆๆฐไปท", "ๆไบค้"]
temp_df["ๆจๆถ็"] = pd.to_numeric(temp_df["ๆจๆถ็"])
temp_df["ไปๅผ็"] = pd.to_numeric(temp_df["ไปๅผ็"])
temp_df["ๆไบค้ข"] = pd.to_numeric(temp_df["ๆไบค้ข"])
temp_df["ๆ้ซไปท"] = pd.to_numeric(temp_df["ๆ้ซไปท"])
temp_df["ๆไฝไปท"] = pd.to_numeric(temp_df["ๆไฝไปท"])
temp_df["ๆๆฐไปท"] = pd.to_numeric(temp_df["ๆๆฐไปท"])
temp_df["ๆไบค้"] = pd.to_numeric(temp_df["ๆไบค้"])
return temp_df
def sw_index_cons(symbol: str = "801011") -> pd.DataFrame:
"""
็ณไธๆๆฐๆไปฝไฟกๆฏ-ๅ
ๆฌไธ็บงๅไบ็บง่กไธ้ฝๅฏไปฅๆฅ่ฏข
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: ๆๆฐไปฃ็
:type symbol: str
:return: ็ณไธๆๆฐๆไปฝไฟกๆฏ
:rtype: pandas.DataFrame
"""
url = f"http://www.swsindex.com/downfile.aspx?code={symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 4:
stock_code = cols[0].text
stock_name = cols[1].text
weight = cols[2].text
start_date = cols[3].text
data.append(
{
"stock_code": stock_code,
"stock_name": stock_name,
"start_date": start_date,
"weight": weight,
}
)
temp_df = pd.DataFrame(data)
temp_df["start_date"] = pd.to_datetime(temp_df["start_date"]).dt.date
temp_df["weight"] = pd.to_numeric(temp_df["weight"])
return temp_df
def sw_index_daily(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
) -> pd.DataFrame:
"""
็ณไธๆๆฐไธ็บงๅไบ็บงๆฅ้ข็่กๆ
ๆฐๆฎ
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: ็ณไธๆๆฐ
:type symbol: str
:param start_date: ๅผๅงๆฅๆ
:type start_date: str
:param end_date: ็ปๆๆฅๆ
:type end_date: str
:return: ็ณไธๆๆฐๆฅ้ข็่กๆ
ๆฐๆฎ
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel2.aspx"
params = {
"ctable": "swindexhistory",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 10:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
open_ = cols[3].text
high = cols[4].text
low = cols[5].text
close = cols[6].text
vol = cols[7].text
amount = cols[8].text
change_pct = cols[9].text
data.append(
{
"index_code": symbol.replace(",", ""),
"index_name": index_name.replace(",", ""),
"date": date.replace(",", ""),
"open": open_.replace(",", ""),
"high": high.replace(",", ""),
"low": low.replace(",", ""),
"close": close.replace(",", ""),
"vol": vol.replace(",", ""),
"amount": amount.replace(",", ""),
"change_pct": change_pct.replace(",", ""),
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["vol"] = pd.to_numeric(temp_df["vol"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df["change_pct"] = pd.to_numeric(temp_df["change_pct"])
return temp_df
def sw_index_daily_indicator(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
) -> pd.DataFrame:
"""
็ณไธไธ็บงๅไบ็บง่กไธๅๅฒ่กๆ
ๆๆ
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: ็ณไธๆๆฐ
:type symbol: str
:param start_date: ๅผๅงๆถ้ด
:type start_date: str
:param end_date: ็ปๆๆถ้ด
:type end_date: str
:param data_type: choice of {"Day": ๆฅๆฅ่กจ, "Week": ๅจๆฅ่กจ}
:type data_type: str
:return: ็ณไธๆๆฐไธๅ้ข็ๆฐๆฎ
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel.aspx"
params = {
"ctable": "V_Report",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}' and type='{data_type}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 14:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
close = cols[3].text
volume = cols[4].text
chg_pct = cols[5].text
turn_rate = cols[6].text
pe = cols[7].text
pb = cols[8].text
v_wap = cols[9].text
turnover_pct = cols[10].text
float_mv = cols[11].text
avg_float_mv = cols[12].text
dividend_yield_ratio = cols[13].text
data.append(
{
"index_code": symbol,
"index_name": index_name,
"date": date,
"close": close,
"volume": volume,
"chg_pct": chg_pct,
"turn_rate": turn_rate,
"pe": pe,
"pb": pb,
"vwap": v_wap,
"float_mv": float_mv,
"avg_float_mv": avg_float_mv,
"dividend_yield_ratio": dividend_yield_ratio,
"turnover_pct": turnover_pct,
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", ""))
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"])
temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"])
temp_df["pe"] = pd.to_numeric(temp_df["pe"])
temp_df["pb"] = pd.to_numeric(temp_df["pb"])
temp_df["vwap"] = pd.to_numeric(temp_df["vwap"])
temp_df["float_mv"] = temp_df["float_mv"].apply(lambda x: x.replace(",", ""))
temp_df["float_mv"] = pd.to_numeric(
temp_df["float_mv"],
)
temp_df["avg_float_mv"] = temp_df["avg_float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["avg_float_mv"] = pd.to_numeric(temp_df["avg_float_mv"])
temp_df["dividend_yield_ratio"] = pd.to_numeric(temp_df["dividend_yield_ratio"])
temp_df["turnover_pct"] = pd.to_numeric(temp_df["turnover_pct"])
return temp_df
def sw_index_third_info() -> pd.DataFrame:
"""
ไนๅไน่ก-็ณไธไธ็บง-ๅ็ฑป
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: ๅ็ฑป
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"่กไธไปฃ็ ",
"่กไธๅ็งฐ",
"ๆไปฝไธชๆฐ",
"้ๆๅธ็็",
"TTM(ๆปๅจ)ๅธ็็",
"ๅธๅ็",
"้ๆ่กๆฏ็",
]
temp_df["ๆไปฝไธชๆฐ"] = pd.to_numeric(temp_df["ๆไปฝไธชๆฐ"])
temp_df["้ๆๅธ็็"] = pd.to_numeric(temp_df["้ๆๅธ็็"])
temp_df["TTM(ๆปๅจ)ๅธ็็"] = pd.to_numeric(temp_df["TTM(ๆปๅจ)ๅธ็็"])
temp_df["ๅธๅ็"] = pd.to_numeric(temp_df["ๅธๅ็"])
temp_df["้ๆ่กๆฏ็"] = pd.to_numeric(temp_df["้ๆ่กๆฏ็"])
return temp_df
def sw_index_third_cons(symbol: str = "851921.SI") -> pd.DataFrame:
"""
ไนๅไน่ก-็ณไธไธ็บง-่กไธๆไปฝ
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
:param symbol: ไธ็บง่กไธ็่กไธไปฃ็
:type symbol: str
:return: ่กไธๆไปฝ
:rtype: pandas.DataFrame
"""
url = f"https://legulegu.com/stockdata/index-composition?industryCode={symbol}"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"ๅบๅท",
"่ก็ฅจไปฃ็ ",
"่ก็ฅจ็ฎ็งฐ",
"็บณๅ
ฅๆถ้ด",
"็ณไธ1็บง",
"็ณไธ2็บง",
"็ณไธ3็บง",
"ไปทๆ ผ",
"ๅธ็็",
"ๅธ็็ttm",
"ๅธๅ็",
"่กๆฏ็",
"ๅธๅผ",
]
temp_df["ไปทๆ ผ"] = pd.to_numeric(temp_df["ไปทๆ ผ"], errors="coerce")
temp_df["ๅธ็็"] = pd.to_numeric(temp_df["ๅธ็็"], errors="coerce")
temp_df["ๅธ็็ttm"] = pd.to_numeric(temp_df["ๅธ็็ttm"], errors="coerce")
temp_df["ๅธๅ็"] = pd.to_numeric(temp_df["ๅธๅ็"], errors="coerce")
temp_df["่กๆฏ็"] = pd.to_numeric(temp_df["่กๆฏ็"].str.strip("%"), errors="coerce")
temp_df["ๅธๅผ"] = pd.to_numeric(temp_df["ๅธๅผ"], errors="coerce")
return temp_df
if __name__ == "__main__":
sw_index_representation_spot_df = sw_index_representation_spot()
print(sw_index_representation_spot_df)
sw_index_spot_df = sw_index_spot()
print(sw_index_spot_df)
sw_index_second_spot_df = sw_index_second_spot()
print(sw_index_second_spot_df)
sw_index_cons_df = sw_index_cons(symbol="801193")
print(sw_index_cons_df)
sw_index_daily_df = sw_index_daily(
symbol="801733", start_date="20001201", end_date="20211207"
)
print(sw_index_daily_df)
sw_index_daily_indicator_df = sw_index_daily_indicator(
symbol="801003",
start_date="20191101",
end_date="20191207",
data_type="Week",
)
print(sw_index_daily_indicator_df)
sw_index_third_info_df = sw_index_third_info()
print(sw_index_third_info_df)
sw_index_third_cons_df = sw_index_third_cons(symbol="851921.SI")
print(sw_index_third_cons_df)
|
the-stack_0_9620 | import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
from torchvision import transforms
import torch
# read data
import os
import numpy as np
from PIL import Image
# utils
TYPE = ['VA_Set', 'EXPR_Set', 'AU_Set']
CLASS = [2, 1, 12]
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
READERS = {
'VA_Set': lambda path: np.genfromtxt(path, dtype=np.single, delimiter=',', skip_header=True),
'EXPR_Set': lambda path: np.genfromtxt(path, dtype=np.int_, skip_header=True),
'AU_Set': lambda path: np.genfromtxt(path, dtype=np.single, delimiter=',', skip_header=True)
}
# datasets
class UnifiedDataset(Dataset):
def __init__(self,
idx: list,
image: np.ndarray,
label: dict,
img_size: int,
mode: str):
# get image
self.idx = idx
self.image = image
self.label = label
# preprocess
if mode == 'Train_Set':
self.preprocess = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.Resize(size=img_size),
transforms.ToTensor(),
transforms.Normalize(
mean=MEAN,
std=STD)
])
else:
self.preprocess = transforms.Compose([
transforms.Resize(size=img_size),
transforms.ToTensor(),
transforms.Normalize(
mean=MEAN,
std=STD)
])
def __getitem__(self, i):
image = Image.open(self.image[i])
image = self.preprocess(image)
label = [self.label['VA_Set'][i],
[self.label['EXPR_Set'][i]],
self.label['AU_Set'][i]]
label = np.concatenate(label)
return image, torch.FloatTensor(label)
def __len__(self):
return len(self.idx)
class UnifiedDataModule(pl.LightningDataModule):
def __init__(self, params: dict):
super().__init__()
self.batch_size = params.get('batch_size', 32)
self.img_size = params.get('img_size', 224)
self.num_workers = params.get('num_workers', 4)
self.dataset_dir = params.get('dataset_dir', '../dataset/Aff-Wild/')
with open(os.path.join(self.dataset_dir, 'file.txt')) as f:
self.image = list(map(lambda x: os.path.join(self.dataset_dir, 'cropped_aligned', x.strip()),
f.readlines()))
self.image = np.array(self.image)
self.label = {}
for label_type in TYPE:
self.label[label_type] = READERS[label_type](os.path.join(self.dataset_dir, label_type + '.txt'))
self.index = np.arange(0, len(self.image))
self.train_idx, self.val_idx = train_test_split(self.index, train_size=0.95, random_state=1234)
def setup(self, stage: str = None) -> None:
if stage == 'fit':
self.train_dataset = UnifiedDataset(
self.train_idx,
self.image,
self.label,
self.img_size,
'Train_Set')
self.val_dataset = UnifiedDataset(
self.val_idx,
self.image,
self.label,
self.img_size,
'Validation_Set')
elif stage == 'validate':
self.val_dataset = UnifiedDataset(
self.val_idx,
self.image,
self.label,
self.img_size,
'Validation_Set')
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers)
if __name__ == '__main__':
os.chdir('..')
dm = UnifiedDataModule({'dataset_dir':'../dataset/Aff-Wild/'})
dm.setup('fit')
dataloader = dm.train_dataloader()
print(len(dataloader.dataset))
img, label = next(iter(dataloader))
print(img.shape, label.shape) |
the-stack_0_9621 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
if (sys.version_info > (3,)):
import http.client
from http.client import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
else:
import httplib
from httplib import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
from flask import request, session, make_response
from flask_restful import Resource
from cairis.daemon.CairisHTTPError import ARMHTTPError
from cairis.data.PersonaDAO import PersonaDAO
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.MessageDefinitions import PersonaMessage, PersonaEnvironmentPropertiesMessage, ValueTypeMessage
from cairis.tools.ModelDefinitions import PersonaModel, PersonaEnvironmentPropertiesModel, ValueTypeModel
from cairis.tools.SessionValidator import get_session_id, get_model_generator
__author__ = 'Shamal Faily'
class PersonasAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = PersonaDAO(session_id)
personas = dao.get_personas(constraint_id=constraint_id)
dao.close()
resp = make_response(json_serialize(personas, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
new_persona = dao.from_json(request)
persona_id = dao.add_persona(new_persona)
dao.close()
resp_dict = {'message': 'Persona successfully added', 'persona_id': persona_id}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
class PersonaByNameAPI(Resource):
def get(self, name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona = dao.get_persona_by_name(name=name)
dao.close()
resp = make_response(json_serialize(persona, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def put(self, name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
req = dao.from_json(request)
dao.update_persona(req, name=name)
dao.close()
resp_dict = {'message': 'Persona successfully updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def delete(self, name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
dao.delete_persona(name=name)
dao.close()
resp_dict = {'message': 'Persona successfully deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class PersonaModelByNameAPI(Resource):
def get(self, persona, variable, characteristic):
session_id = get_session_id(session, request)
model_generator = get_model_generator()
dao = PersonaDAO(session_id)
if variable == 'All': variable = ''
if characteristic == 'All': characteristic = ''
dot_code = dao.get_persona_model(persona,variable,characteristic)
dao.close()
resp = make_response(model_generator.generate(dot_code, model_type='persona', renderer='dot'), OK)
accept_header = request.headers.get('Accept', 'image/svg+xml')
if accept_header.find('text/plain') > -1:
resp.headers['Content-type'] = 'text/plain'
else:
resp.headers['Content-type'] = 'image/svg+xml'
return resp
class PersonaCharacteristicsByNameAPI(Resource):
def get(self, persona, variable, characteristic):
session_id = get_session_id(session, request)
model_generator = get_model_generator()
dao = PersonaDAO(session_id)
if variable == 'All': variable = ''
if characteristic == 'All': characteristic = ''
char_names = dao.get_persona_characteristics(persona,variable,characteristic)
dao.close()
resp = make_response(json_serialize(char_names, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class PersonaNamesAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona_names = dao.get_persona_names()
dao.close()
resp = make_response(json_serialize(persona_names, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class PersonaTypesAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
pTypes = dao.get_persona_types()
dao.close()
resp = make_response(json_serialize(pTypes, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class PersonaEnvironmentPropertiesAPI(Resource):
def get(self, persona_name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona_props = dao.get_persona_props(name=persona_name)
dao.close()
resp = make_response(json_serialize(asset_props, session_id=session_id))
resp.contenttype = 'application/json'
return resp
def put(self, persona_name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona_prop = dao.from_json(request, to_props=True)
dao.update_persona_properties(persona_prop, name=persona_name)
dao.close()
resp_dict = {'message': 'The persona properties were successfully updated.'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
class PersonasSummaryAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
objts = dao.get_personas_summary()
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
|
the-stack_0_9623 | import os
import sys
import shutil
import jsbeautifier
from utils.content import getContent
from utils.formatter import Formatter
from utils.merge import Merge
from utils.classify import Classifier
class Process:
def main(self, file, argvs):
path = sys.path[0]
print("****************************")
print(file)
res = jsbeautifier.beautify_file(file)
preFile = "preformat_" + file
op = open(preFile, "w+")
op.write(res)
op.close()
oFileContent = getContent(preFile)
formatFile = Formatter()
formatFile.formatter(file, oFileContent)
fFile = "formatted_" + file
fFileContent = getContent(fFile)
isAbnormal = False
isHighRisk = False
mergeFile = Merge()
isAbnormal, isHighRisk = mergeFile.mergeReduce(file, fFileContent, argvs)
print(isAbnormal, isHighRisk)
srcProcessedPath = path + "/" + file
if not isAbnormal and not isHighRisk:
#classify processible contract
classify = Classifier()
mFile = "merged_" + file
mFileContent = getContent(mFile)
isProcessible = classify.classifier(mFileContent)
print(isProcessible)
srcProcessiblePath = path + "/" + mFile
if isProcessible:
dstProcessiblePath = path + "/Processible/" + mFile
shutil.copy(srcProcessiblePath, dstProcessiblePath)
print(mFile, " is processible and has been put in the Processible directory.")
os.remove(srcProcessiblePath)
else:
os.remove(srcProcessiblePath)
desProcessedPath = path + "/ProcessedContracts/" + file
noteStr = "ProcessedContracts"
elif not isAbnormal and isHighRisk:
desProcessedPath = path + "/varRepeatContracts/" + file
noteStr = "varRepeatContracts"
elif isAbnormal and not isHighRisk:
desProcessedPath = path + "/abnormalContracts/" + file
noteStr = "abnormalContracts"
shutil.copy(srcProcessedPath, desProcessedPath)
print(file, " has been moved to the " + noteStr +" directory.")
#remove formatted contract
formattedFile = path + "/" + fFile
os.remove(formattedFile)
os.remove(preFile)
os.remove(srcProcessedPath)
if __name__ == "__main__":
filename = sys.argv[1]
argvs = ''
if len(sys.argv) > 2:
argvs = sys.argv[2]
main(filename, argvs)
|
the-stack_0_9624 | import os
import shutil
import yaml
from six import iteritems
from ..base import PackageJson, BasePackageManager, PackageManagerError
from .lockfile import PnpmLockfile
from .workspace import PnpmWorkspace
from .utils import build_pj_path, build_lockfile_path, build_ws_config_path, build_nm_bundle_path
class PnpmPackageManager(BasePackageManager):
_STORE_NM_PATH = os.path.join(".pnpm", "store")
_VSTORE_NM_PATH = os.path.join(".pnpm", "virtual-store")
_STORE_VER = "v3"
def install(self):
"""
Creates node_modules directory according to the lockfile.
"""
self._prepare_workspace()
self._exec_command([
"install",
"--offline",
"--frozen-lockfile",
"--store-dir", self._nm_path(self._STORE_NM_PATH),
"--virtual-store-dir", self._nm_path(self._VSTORE_NM_PATH),
"--no-verify-store-integrity",
"--package-import-method", "hardlink",
"--ignore-pnpmfile",
"--ignore-scripts",
"--strict-peer-dependencies",
])
self._fix_stores_in_modules_yaml()
def get_peer_paths_from_package_json(self):
"""
Returns paths of direct workspace dependencies (source root related).
:rtype: list of str
"""
pj = PackageJson.load(build_pj_path(self.sources_path))
return map(lambda x: os.path.normpath(os.path.join(self.module_path, x[1])), pj.get_workspace_dep_paths())
def calc_node_modules_inouts(self):
"""
Returns input and output paths for command that creates `node_modules` bundle.
:return: Pair of input and output paths with correct roots ($S or $B).
:rtype: (list of str, list of str)
"""
# Inputs: source package.json and lockfile, built package.jsons, lockfiles and workspace configs of deps, tarballs.
ins = []
# Source lockfiles are used only to get tarballs info.
src_lf_paths = [build_lockfile_path(self.sources_path)]
pj = PackageJson.load(build_pj_path(self.sources_path))
for [dep_src_path, (dep_pj, depth)] in iteritems(pj.get_workspace_map()):
if dep_src_path == self.sources_path:
continue
dep_mod_path = dep_src_path[len(self.sources_root) + 1:]
# pnpm requires all package.jsons.
ins.append(build_pj_path(dep_mod_path))
dep_lf_src_path = build_lockfile_path(dep_src_path)
if not os.path.isfile(dep_lf_src_path):
continue
src_lf_paths.append(dep_lf_src_path)
# Merged workspace configs and lockfiles of direct deps.
if depth == 1:
ins.append(build_ws_config_path(dep_mod_path))
ins.append(build_lockfile_path(dep_mod_path))
for pkg in self.extract_packages_meta_from_lockfiles(src_lf_paths):
ins.append(self._contrib_tarball_path(pkg))
s_root = lambda x: os.path.join("$S", x)
b_root = lambda x: os.path.join("$B", x)
ins = map(b_root, ins) + [
s_root(build_pj_path(self.module_path)),
s_root(build_lockfile_path(self.module_path)),
]
# Outputs: patched lockfile, generated workspace config, created node_modules bundle.
outs = [b_root(f(self.module_path)) for f in (build_lockfile_path, build_ws_config_path, build_nm_bundle_path)]
return (ins, outs)
def extract_packages_meta_from_lockfiles(self, lf_paths):
"""
:type lf_paths: iterable of BaseLockfile
:rtype: iterable of LockfilePackageMeta
"""
tarballs = set()
for lf_path in lf_paths:
try:
for pkg in PnpmLockfile.load(lf_path).get_packages_meta():
if pkg.tarball_path not in tarballs:
tarballs.add(pkg.tarball_path)
yield pkg
except Exception as e:
raise PackageManagerError("Unable to process lockfile {}: {}".format(lf_path, e))
def _prepare_workspace(self):
pj = self._build_package_json()
ws = PnpmWorkspace(build_ws_config_path(self.build_path))
ws.set_from_package_json(pj)
dep_paths = ws.get_paths()
self._build_merged_workspace_config(ws, dep_paths)
self._build_merged_lockfile(dep_paths)
def _build_package_json(self):
"""
:rtype: PackageJson
"""
in_pj_path = build_pj_path(self.sources_path)
out_pj_path = build_pj_path(self.build_path)
shutil.copyfile(in_pj_path, out_pj_path)
return PackageJson.load(out_pj_path)
def _build_merged_lockfile(self, dep_paths):
"""
:type dep_paths: list of str
:rtype: PnpmLockfile
"""
in_lf_path = build_lockfile_path(self.sources_path)
out_lf_path = build_lockfile_path(self.build_path)
lf = PnpmLockfile.load(in_lf_path)
# Change to the output path for correct path calcs on merging.
lf.path = out_lf_path
for dep_path in dep_paths:
if dep_path is self.build_path:
continue
lf_path = build_lockfile_path(dep_path)
if os.path.isfile(lf_path):
lf.merge(PnpmLockfile.load(lf_path))
lf.update_tarball_resolutions(lambda p: self._contrib_tarball_url(p))
lf.write()
def _build_merged_workspace_config(self, ws, dep_paths):
"""
:type ws: PnpmWorkspaceConfig
:type dep_paths: list of str
"""
for dep_path in dep_paths:
if dep_path is self.build_path:
continue
ws_config_path = build_ws_config_path(dep_path)
if os.path.isfile(ws_config_path):
ws.merge(PnpmWorkspace.load(ws_config_path))
ws.write()
def _fix_stores_in_modules_yaml(self):
"""
Ensures that store paths are the same as would be after installing deps in the source dir.
This is required to reuse `node_modules` after build.
"""
with open(self._nm_path(".modules.yaml"), "r+") as f:
data = yaml.load(f, Loader=yaml.CSafeLoader)
# NOTE: pnpm requires absolute store path here.
data["storeDir"] = os.path.join(self.sources_path, "node_modules", self._STORE_NM_PATH, self._STORE_VER)
data["virtualStoreDir"] = self._VSTORE_NM_PATH
f.seek(0)
yaml.dump(data, f, Dumper=yaml.CSafeDumper)
f.truncate()
def _get_default_options(self):
return super(PnpmPackageManager, self)._get_default_options() + [
"--stream",
"--reporter", "append-only",
"--no-color",
]
def _get_debug_log_path(self):
return self._nm_path(".pnpm-debug.log")
|
the-stack_0_9625 | import unittest
from models import articles
Articles = articles.Articles
class ArticlesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Articles class
'''
def setUp(self):
'''
Set up method that will run before every test
'''
self.new_articles = Articles('id','author','description','https://www.youtube.com/watch?v=RN75zSpYp7M',"https://i.kinja-img.com/gawker-media/image/upload/s--yDtXY-I4--/c_fill,fl_progressive,g_center,h_900,q_80,w_1600/pj5jc9ntilzdb4dfnivl.png",'kenya','content')
def test_instance(self):
self.assertTrue(isinstance(self.new_articles,Articles))
if __name__ == '__main__':
unittest.main() |
the-stack_0_9626 | # -*- coding: utf-8 -*-
from seleniumbase import BaseCase
class ChinesePdfTests(BaseCase):
def test_chinese_pdf(self):
pdf = (
"https://github.com/seleniumbase/SeleniumBase/"
"files/3895614/unittest.pdf"
)
# Get and print PDF text
pdf_text = self.get_pdf_text(pdf, page=2)
self._print("\n" + pdf_text)
# Assert PDF contains the expected text on Page 2
self.assert_pdf_text(pdf, "ไธชๆต่ฏ็ฑป", page=2)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(pdf, "่ฟ่กๅๅ
ๆต่ฏ")
self.assert_pdf_text(pdf, "็ญๅพ
ๆต่ฏ็ปๆๅๆพ็คบๆๆ็ปๆ")
self.assert_pdf_text(pdf, "ๆต่ฏ็ๆง่ก่ทๆนๆณ็้กบๅบๆฒกๆๅ
ณ็ณป")
|
the-stack_0_9627 | import unittest
from yoti_python_sdk.doc_scan.session.retrieve.frame_response import FrameResponse
from yoti_python_sdk.doc_scan.session.retrieve.media_response import MediaResponse
class FrameResponseTest(unittest.TestCase):
def test_should_parse_correctly(self):
data = {"media": {}}
result = FrameResponse(data)
assert isinstance(result.media, MediaResponse)
def test_should_parse_when_none(self):
result = FrameResponse(None)
assert isinstance(result, FrameResponse)
assert result.media is None
if __name__ == "__main__":
unittest.main()
|
the-stack_0_9630 | from synapseclient.activity import Activity
# SYNPY-744
def test_private_getStringList():
act = Activity()
url_string = \
'https://github.com/Sage-Bionetworks/ampAdScripts/blob/master/Broad-Rush/migrateROSMAPGenotypesFeb2015.R'
act.used([{'wasExecuted': True,
'concreteType': 'org.sagebionetworks.repo.model.provenance.UsedURL',
'url': url_string}
])
assert [url_string] == act._getStringList()
|
the-stack_0_9631 | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# darkmoss scheme by Gabriel Avanzi (https://github.com/avanzzzi)
base00 = "#171e1f"
base01 = "#252c2d"
base02 = "#373c3d"
base03 = "#555e5f"
base04 = "#818f80"
base05 = "#c7c7a5"
base06 = "#e3e3c8"
base07 = "#e1eaef"
base08 = "#ff4658"
base09 = "#e6db74"
base0A = "#fdb11f"
base0B = "#499180"
base0C = "#66d9ef"
base0D = "#498091"
base0E = "#9bc0c8"
base0F = "#d27b53"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base01
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base05
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base02
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base02
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base02
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base0B
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menuโs selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base02
#Foreground color of the context menuโs selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base05
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base02
# Foreground color for the selected item in filename prompts.
c.colors.prompts.selected.fg = base05
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base01
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base01
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
|
the-stack_0_9632 | from datetime import datetime, date
from six import iteritems, PY2, PY3, u
import json
import pytz
from enum import Enum
if PY3:
from datetime import timezone
# compat
from six.moves import map
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime) or isinstance(obj, date) else None
class OutputModes(Enum):
"""List of valid settings for the output_mode parameter of the OpenTok.start_archive()
method."""
composed = u('composed')
"""All streams in the archive are recorded to a single (composed) file."""
individual = u('individual')
"""Each stream in the archive is recorded to an individual file."""
class Archive(object):
"""Represents an archive of an OpenTok session.
:ivar created_at:
The time at which the archive was created, in milliseconds since the UNIX epoch.
:ivar duration:
The duration of the archive, in milliseconds.
:ivar has_audio:
Boolean value set to true when the archive contains an audio track,
and set to false otherwise.
:ivar has_video:
Boolean value set to true when the archive contains a video track,
and set to false otherwise.
:ivar id:
The archive ID.
:ivar name:
The name of the archive. If no name was provided when the archive was created, this is set
to null.
:ivar output_mode:
Whether all streams in the archive are recorded to a single file
(OutputModes.composed) or to individual files (OutputModes.individual).
:ivar partnerId:
The API key associated with the archive.
:ivar reason:
For archives with the status "stopped", this can be set to "90 mins exceeded", "failure",
"session ended", or "user initiated". For archives with the status "failed", this can be set
to "system failure".
:ivar sessionId:
The session ID of the OpenTok session associated with this archive.
:ivar size:
The size of the MP4 file. For archives that have not been generated, this value is set to 0.
:ivar status:
The status of the archive, which can be one of the following:
* "available" -- The archive is available for download from the OpenTok cloud.
* "expired" -- The archive is no longer available for download from the OpenTok cloud.
* "failed" -- The archive recording failed.
* "paused" -- The archive is in progress and no clients are publishing streams to the
session. When an archive is in progress and any client publishes a stream, the status is
"started". When an archive is paused, nothing is recorded. When a client starts publishing
a stream, the recording starts (or resumes). If all clients disconnect from a session that
is being archived, the status changes to "paused", and after 60 seconds the archive
recording stops (and the status changes to "stopped").
* "started" -- The archive started and is in the process of being recorded.
* "stopped" -- The archive stopped recording.
* "uploaded" -- The archive is available for download from the the upload target
Amazon S3 bucket or Windows Azure container that you set at the
`OpenTok dashboard <https://dashboard.tokbox.com>`_.
:ivar url:
The download URL of the available MP4 file. This is only set for an archive with the status set to
"available"; for other archives, (including archives with the status "uploaded") this property is
set to null. The download URL is obfuscated, and the file is only available from the URL for
10 minutes. To generate a new URL, call the Archive.listArchives() or OpenTok.getArchive() method.
"""
def __init__(self, sdk, values):
self.sdk = sdk
self.id = values.get('id')
self.name = values.get('name')
self.status = values.get('status')
self.session_id = values.get('sessionId')
self.partner_id = values.get('partnerId')
if PY2:
self.created_at = datetime.fromtimestamp(values.get('createdAt') / 1000, pytz.UTC)
if PY3:
self.created_at = datetime.fromtimestamp(values.get('createdAt') // 1000, timezone.utc)
self.size = values.get('size')
self.duration = values.get('duration')
self.has_audio = values.get('hasAudio')
self.has_video = values.get('hasVideo')
self.output_mode = OutputModes[values.get('outputMode', 'composed')]
self.url = values.get('url')
def stop(self):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
"""
temp_archive = self.sdk.stop_archive(self.id)
for k,v in iteritems(temp_archive.attrs()):
setattr(self, k, v)
def delete(self):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
"""
self.sdk.delete_archive(self.id)
# TODO: invalidate this object
def attrs(self):
"""
Returns a dictionary of the archive's attributes.
"""
return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk")
def json(self):
"""
Returns a JSON representation of the archive.
"""
return json.dumps(self.attrs(), default=dthandler, indent=4)
class ArchiveList(object):
def __init__(self, sdk, values):
self.count = values.get('count')
self.items = list(map(lambda x: Archive(sdk, x), values.get('items', [])))
def __iter__(self):
for x in self.items:
yield x
def attrs(self):
return {
'count': self.count,
'items': map(Archive.attrs, self.items)
}
def json(self):
return json.dumps(self.attrs(), default=dthandler, indent=4)
def __getitem__(self, key):
return self.items.get(key)
def __setitem__(self, key, item):
raise ArchiveError(u('Cannot set item {0} for key {1} in Archive object').format(item, key))
def __len__(self):
return len(self.items)
|
the-stack_0_9633 | import hashlib
import os
def upload_path(instance, filename, **kwargs):
hasher = hashlib.md5()
for chunk in instance.image.chunks():
hasher.update(chunk)
hash = hasher.hexdigest()
base, ext = os.path.splitext(filename)
return '%(first)s/%(second)s/%(hash)s/%(base)s%(ext)s' % {
'first': hash[0],
'second': hash[1],
'hash': hash,
'base': base,
'ext': ext,
}
|
the-stack_0_9634 | import os
import sys
import django
import logging
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
profile = os.environ.get('HELLOFAMILYCLUB', 'develop')
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'hellofamilyclub.settings.{}'.format(profile))
django.setup()
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from pictures.service.weibo import fetch_weibo_pictures
from pictures.service.recognize import recognize_all_pictures
from pictures.service.config import db_client
from news.service.helloproject_news import run_collect_hello_project_news
logging.basicConfig(filename='/Users/yuhao/log/job.log', filemode='a')
logging.getLogger('apscheduler').setLevel(logging.DEBUG)
jobstores = {
'mongo': MongoDBJobStore(collection='job', database='hellofamily',
client=db_client),
'default': MemoryJobStore()
}
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5),
}
job_defaults = {
'coalesce': False,
'max_instances': 10,
}
scheduler = BlockingScheduler(jobstores=jobstores, executors=executors,
job_defaults=job_defaults)
scheduler.add_job(fetch_weibo_pictures, 'interval', hours=1,
replace_existing=True, id='fetch_weibo_pictures',
jobstore='mongo', max_instances=1)
scheduler.add_job(recognize_all_pictures, 'interval', hours=1,
replace_existing=True, id='recognize_all_pictures',
jobstore='mongo', max_instances=1)
scheduler.add_job(run_collect_hello_project_news, 'interval', hours=2,
replace_existing=True, id='collect_hello_project_news',
jobstore='mongo', max_instances=1)
scheduler.start()
|
the-stack_0_9635 | import os
from appi2c.ext.database import db
def init_app(app):
app.config["SECRET_KEY"] = "appi2c_from_raspberry"
basedir = os.path.abspath(os.path.dirname('ext/database/'))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'database.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['FLASK_ADMIN_SWATCH'] = 'cosmo'
app.config["MAX_IMAGE_FILESIZE"] = 10 * 1024 * 1024
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPEG", "JPG", "PNG", "GIF"]
if app.debug:
app.config["DEBUG_TB_TEMPLATE_EDITOR_ENABLED"] = True
app.config["DEBUG_TB_PROFILER_ENABLED"] = True
|
the-stack_0_9637 | import pathlib
import os
import unittest
from explainaboard import FileType, Source, TaskType, get_loader, get_processor
artifacts_path = os.path.dirname(pathlib.Path(__file__)) + "/artifacts/"
class TestTextPairClassification(unittest.TestCase):
def test_snli(self):
metadata = {"task_name": TaskType.text_classification.value,
"metric_names": ["Accuracy"]}
path_data = artifacts_path+ "test-snli.tsv"
loader = get_loader(TaskType.text_pair_classification, Source.local_filesystem, FileType.tsv, path_data)
data = loader.load()
processor = get_processor(TaskType.text_pair_classification, metadata, data)
self.assertEqual(len(processor._features), 8)
analysis = processor.process()
#analysis.to_memory()
# analysis.write_to_directory("./")
self.assertListEqual(analysis.metric_names, metadata["metric_names"])
# self.assertIsNotNone(analysis.results.fine_grained)
# self.assertGreater(len(analysis.results.overall), 0)
|
the-stack_0_9638 | import numpy as np
import os
import sys
sys.path.append('mytorch')
from loss import *
from activation import *
from batchnorm import *
from linear import *
class MLP(object):
"""
A simple multilayer perceptron
"""
def __init__(self, input_size, output_size, hiddens, activations, weight_init_fn,
bias_init_fn, criterion, lr, momentum=0.0, num_bn_layers=0):
self.train_mode = True
self.num_bn_layers = num_bn_layers
self.bn = num_bn_layers > 0
self.nlayers = len(hiddens) + 1
self.input_size = input_size
self.output_size = output_size
self.activations = activations
self.criterion = criterion
self.lr = lr
self.momentum = momentum
if (len(hiddens) <= 0):
self.linear_layers = [Linear(input_size, output_size, weight_init_fn, bias_init_fn)]
else:
self.linear_layers = []
self.linear_layers.append(Linear(input_size, hiddens[0], weight_init_fn, bias_init_fn))
for i in range(1, len(hiddens)):
self.linear_layers.append(Linear(hiddens[i-1], hiddens[i], weight_init_fn, bias_init_fn))
self.linear_layers.append(Linear(hiddens[-1], output_size, weight_init_fn, bias_init_fn))
if self.bn:
self.bn_layers = [BatchNorm(hiddens[i]) for i in range(num_bn_layers)]
self.output = None
def forward(self, x):
"""
Argument:
x (np.array): (batch size, input_size)
Return:
out (np.array): (batch size, output_size)
"""
for i in range(len(self.linear_layers)):
x = self.linear_layers[i](x)
if i < self.num_bn_layers:
x = self.bn_layers[i](x, (not self.train_mode))
x = self.activations[i](x)
# x = self.activations[-1](x)
self.output = x
return x
def zero_grads(self):
for i in range(len(self.linear_layers)):
self.linear_layers[i].dW.fill(0.0)
def step(self):
for i in range(len(self.linear_layers)):
self.linear_layers[i].momentum_W = self.momentum * self.linear_layers[i].momentum_W - self.lr * self.linear_layers[i].dW
# print(self.linear_layers[i].dW)
self.linear_layers[i].W = self.linear_layers[i].W + self.linear_layers[i].momentum_W
self.linear_layers[i].momentum_b = self.momentum * self.linear_layers[i].momentum_b - self.lr * self.linear_layers[i].db
self.linear_layers[i].b = self.linear_layers[i].b + self.linear_layers[i].momentum_b
if self.bn:
for i in range(len(self.bn_layers)):
self.bn_layers[i].gamma = self.bn_layers[i].gamma - self.lr * self.bn_layers[i].dgamma
# self.bn_layers[i].gamma = self.bn_layers[i].gamma/np.sqrt(self.bn_layers[i].running_var + self.bn_layers[i].eps)
self.bn_layers[i].beta = self.bn_layers[i].beta - self.lr * self.bn_layers[i].dbeta
# self.bn_layers[i].beta = self.bn_layers[i].beta - self.bn_layers[i].gamma * self.bn_layers[i].running_mean
def backward(self, labels):
self.criterion.forward(self.output, labels)
grd = self.criterion.derivative()
# print(self.criterion.logsum)
for i in range(self.nlayers - 1, -1,-1):
grd = self.activations[i].derivative() * grd
# print(grd)
if self.bn and i < self.num_bn_layers:
grd = self.bn_layers[i].backward(grd)
grd = self.linear_layers[i].backward(grd)
return grd
def error(self, labels):
return (np.argmax(self.output, axis = 1) != np.argmax(labels, axis = 1)).sum()
def total_loss(self, labels):
return self.criterion(self.output, labels).sum()
def __call__(self, x):
return self.forward(x)
def train(self):
self.train_mode = True
def eval(self):
self.train_mode = False
def get_training_stats(mlp, dset, nepochs, batch_size):
train, val, _ = dset
trainx, trainy = train
valx, valy = val
idxs = np.arange(len(trainx))
training_losses = np.zeros(nepochs)
training_errors = np.zeros(nepochs)
validation_losses = np.zeros(nepochs)
validation_errors = np.zeros(nepochs)
for e in range(nepochs):
print(e)
t_row= np.arange(trainx.shape[0])
np.random.shuffle(t_row)
trainx = trainx[t_row,:]
trainy = trainy[t_row,:]
# print(t_row == idxs)
# Per epoch setup ...
batchmean = []
batchtotal = []
for b in range(0, len(trainx), batch_size):
mlp.zero_grads()
mlp.forward(trainx[b:b+batch_size, :])
mlp.backward(trainy[b:b+batch_size, :])
batchtotal.append(mlp.total_loss(trainy[b:b+batch_size, :])/batch_size)
# print(type(mlp.total_loss(trainy[b:b+batch_size, :])))
batchmean.append(mlp.error(trainy[b:b+batch_size, :])/batch_size)
mlp.step()
valloss = []
valerror = []
for b in range(0, len(valx), batch_size):
mlp.forward(valx[b:batch_size, :])
valloss.append(mlp.total_loss(valy[b:batch_size, :])/batch_size)
valerror.append(mlp.error(valy[b:batch_size, :])/batch_size)
training_errors[e] = np.array(batchmean).mean()
training_losses[e] = np.array(batchtotal).mean()
validation_errors[e] = np.array(valerror).mean()
validation_losses[e] = np.array(valloss).mean()
print(np.min(training_losses))
print(np.min(training_errors))
return (training_losses, training_errors, validation_losses, validation_errors)
|
the-stack_0_9639 | from ._city_transformer_postscripts import CityTransformerPostscripts
from ._city_transformer_inverse_postscripts import CityTransformerInversePostscripts
def get_postscripts(name):
POST_SCRIPTS = {
'CityTransformer': CityTransformerPostscripts,
'CityTransformerInverse': CityTransformerInversePostscripts,
}
for n, p in POST_SCRIPTS.items():
if n.lower() == name.lower():
return p
raise ValueError(f'trainer {name} is not defined')
|
the-stack_0_9641 | import math
from typing import Optional
import torch
from falkon.options import BaseOptions
import falkon
from falkon.mmv_ops.utils import _setup_opt, _get_cpu_ram
from falkon.sparse.sparse_tensor import SparseTensor
from falkon.utils.helpers import select_dim_over_d, sizeof_dtype, select_dim_over_m
from falkon.utils.tensor_helpers import create_same_stride
def fmmv_cpu_sparse(X1: SparseTensor,
X2: SparseTensor,
v: torch.Tensor,
kernel: 'falkon.kernels.Kernel',
out: Optional[torch.Tensor],
opt: BaseOptions):
opt = _setup_opt(opt, is_cpu=True)
dtype = X1.dtype
ntot, dtot = X1.size()
mtot, T = v.size()
# Create output matrix
if out is None:
out = torch.empty(ntot, T, dtype=dtype)
out.fill_(0.0)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# Narrowing X1, X2: n + m
# Prepare - not computable, depends on kernel
# ker_chunk : n*m
# finalize : 0 (if can be implemented in place, kernel-dependent)
n, m = select_dim_over_m(
maxM=mtot, maxN=ntot,
coef_nm=1, coef_n=1, coef_m=1, tot=avail_mem)
ker_chunk = create_same_stride((n, m), out, dtype, device='cpu')
for i in range(0, ntot, n):
ic = min(n, ntot - i)
cur_out = out[i:i + ic, :]
X1_chunk = X1.narrow_rows(i, ic)
for j in range(0, mtot, m):
jc = min(m, mtot - j)
X2_chunk = X2.narrow_rows(j, jc)
cur_ker_chunk = ker_chunk[:ic, :jc]
cur_ker_chunk.fill_(0.0)
ddd = kernel._prepare_sparse(X1_chunk, X2_chunk)
kernel._apply_sparse(X1_chunk, X2_chunk.transpose_csc(), cur_ker_chunk)
kernel._finalize(cur_ker_chunk, ddd)
# Multiply by the vector v
cur_out.addmm_(cur_ker_chunk, v.narrow(0, j, jc))
return out
def fmmv_cpu(X1, X2, v, kernel, out, opt):
"""Blockwise kernel-vector product
This function computes ``kernel(X1, X2) @ v`` in a blockwise fashion, to avoid having the
whole N*M kernel matrix in memory at once.
Note that while the principle is that of matrix-vector product, `v` can have more than
one column.
Parameters
-----------
X1
[N, D] array
X2
[M, D] array
v
[M, T] array
kernel
Class representing the desired kernel function
out : torch.Tensor or None
[N, T] array for storing the kernel-vector product output.
If None, will be allocated within the function.
opt
Basic options dictionary, used for determining available memory.
"""
opt = _setup_opt(opt, is_cpu=True)
ntot, dtot = X1.size(0), X1.size(1)
M, T = v.size()
dtype = v.dtype
# Create output matrix
if out is None:
out = torch.empty(ntot, T, dtype=dtype)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# Only necessary memory allocation is that for the temporary kernel
# `temp_out` of size n*M
n, d = select_dim_over_d(
maxD=dtot, maxN=ntot,
coef_nd=0, coef_n=M, coef_d=0, rest=0, tot=avail_mem)
# Run batched matrix multiplication
for i in range(0, ntot, n):
ic = min(n, ntot - i)
ddd = kernel._prepare(X1.narrow(0, i, ic), X2) # , v=v)
temp_out = torch.zeros(ic, M, dtype=dtype)
for k in range(0, dtot, d):
kc = min(d, dtot - k)
X1d = X1[i: i + ic, k: k + kc]
X2d = X2[:, k: k + kc]
kernel._apply(X1d, X2d.T, temp_out)
# temp_out = fnc(X1*X2', X1, X2)
kernel._finalize(temp_out, ddd)
torch.mm(temp_out, v, out=out[i: i + ic, :])
return out
def fdmmv_cpu(X1, X2, v, w, kernel, out, opt):
"""Calculate a double kernel-vector product.
This function computes the following quantity: ``kernel(X1, X2).T @ (kernel(X1, X2) @ v + w)``
Where one of `v` or `w` can be empty.
All arrays passed to this function must be 2-dimensional, although
the second dimension can be unitary.
The expression is not computed directly. We separate the computation
into smaller blocks so as to reduce the total memory consumption (the
large N*M kernel matrix is never wholly stored in RAM.)
Parameters
-----------
X1
[N, D] array
X2
[M, D] array
v : torch.Tensor or None
[M, T] array. But note that at least one of v or w must be specified.
w : torch.Tensor or None
[N, T] array. But note that at least one of v or w must be specified.
kernel
Class representing the desired kernel function
out : torch.Tensor or None
[M, T] array for storing the kernel-vector product output.
If None, will be allocated within the function.
opt
Basic options dictionary, used for determining available memory.
"""
opt = _setup_opt(opt, is_cpu=True)
# Parameter validation
if v is None and w is None:
raise ValueError("One of v and w must be specified to run fMMV.")
T = v.shape[1] if v is not None else w.shape[1]
ntot, dtot = X1.size()
M = X2.size(0)
dtype = X1.dtype
# Create output matrix
if out is None:
out = torch.empty(M, T, dtype=dtype)
out.fill_(0)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# The only necessary temporary matrices are: `temp_out` of size n*M and
# temp_w_block of size n*T
n, d = select_dim_over_d(
maxD=dtot, maxN=ntot,
coef_nd=0, coef_n=M + T, coef_d=0, rest=0, tot=avail_mem)
# Run Batched Matrix Computation
for i in range(0, ntot, n):
ic = min(n, ntot - i)
ddd = kernel._prepare(X1[i: i + ic, :], X2)
temp_out = torch.zeros(ic, M, dtype=dtype)
for k in range(0, dtot, d):
kc = min(d, dtot - k)
X1d = X1[i: i + ic, k: k + kc]
X2d = X2[:, k: k + kc]
kernel._apply(X1d, X2d.T, temp_out)
kernel._finalize(temp_out, ddd) # fnc(X1*X2', X1, X2) [n x M]
w_blk = torch.zeros(ic, T, dtype=dtype) # n x T
if w is not None:
w_blk.copy_(w[i: i + ic, :])
if v is not None:
# w_blk + c_out * v => (n x T) + (n x M)*(M x T)
w_blk.addmm_(temp_out, v)
out.add_(torch.mm(temp_out.T, w_blk))
return out
def fdmmv_cpu_sparse(X1: SparseTensor,
X2: SparseTensor,
v: Optional[torch.Tensor],
w: Optional[torch.Tensor],
kernel,
out: Optional[torch.Tensor] = None,
opt: Optional[BaseOptions] = None):
opt = _setup_opt(opt, is_cpu=True)
# Parameter validation
if v is None and w is None:
raise ValueError("One of v and w must be specified to run fMMV.")
T = v.size(1) if v is not None else w.size(1)
ntot, dtot = X1.size()
M = X2.size(0)
dtype = X1.dtype
# Create output matrix
if out is None:
out = torch.empty(M, T, dtype=dtype)
out.fill_(0)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# Narrow X1 : n
# ker_chunk : n*M
# w_blk : n*T
n = avail_mem / (M * T + 1)
n = int(math.floor(n))
if n < 1:
raise MemoryError(("Available memory %.2fGB is insufficient "
"for blockwise fdMMv.") % (avail_mem * sizeof_dtype(dtype) / 2**30))
# Allocate fixed arrays
ker_chunk = create_same_stride((n, M), out, dtype, device='cpu')
w_blk = create_same_stride((n, T), out, dtype, device='cpu')
# Run blocked fdmmv
for i in range(0, ntot, n):
ic = min(n, ntot - i)
X1_chunk = X1.narrow_rows(i, ic)
cur_ker_chunk = ker_chunk[:ic]
cur_ker_chunk.fill_(0.0)
ddd = kernel._prepare_sparse(X1_chunk, X2)
kernel._apply_sparse(X1_chunk, X2.transpose_csc(), cur_ker_chunk)
kernel._finalize(cur_ker_chunk, ddd)
# Multiply by the vector v
cur_w_blk = w_blk[:ic] # n x T
cur_w_blk.fill_(0.0)
if w is not None:
cur_w_blk.copy_(w[i: i + ic, :])
if v is not None:
# w_blk + c_out * v => (n x T) + (n x M)*(M x T)
cur_w_blk.addmm_(cur_ker_chunk, v)
out.addmm_(cur_ker_chunk.T, cur_w_blk)
del ker_chunk, w_blk
return out
|
the-stack_0_9642 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
# TODO(jbudorick): Split these constants into coherent modules.
# pylint: disable=W0212
import collections
import logging
import os
import subprocess
import devil.android.sdk.keyevent
from devil.android.sdk import version_codes
from devil.constants import exit_codes
keyevent = devil.android.sdk.keyevent
DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir)))
PackageInfo = collections.namedtuple('PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket',
'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
'legacy_browser': PackageInfo(
'com.google.android.browser',
'com.android.browser.BrowserActivity',
None,
None,
None),
'chromecast_shell': PackageInfo(
'com.google.android.apps.mediashell',
'com.google.android.apps.mediashell.MediaShellActivity',
'/data/local/tmp/castshell-command-line',
None,
None),
'content_shell': PackageInfo(
'org.chromium.content_shell_apk',
'org.chromium.content_shell_apk.ContentShellActivity',
'/data/local/tmp/content-shell-command-line',
None,
'org.chromium.content_shell_apk.tests'),
'android_webview_shell': PackageInfo(
'org.chromium.android_webview.shell',
'org.chromium.android_webview.shell.AwShellActivity',
'/data/local/tmp/android-webview-command-line',
None,
'org.chromium.android_webview.test'),
'gtest': PackageInfo(
'org.chromium.native_test',
'org.chromium.native_test.NativeUnitTestActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'components_browsertests': PackageInfo(
'org.chromium.components_browsertests_apk',
('org.chromium.components_browsertests_apk' +
'.ComponentsBrowserTestsActivity'),
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'content_browsertests': PackageInfo(
'org.chromium.content_browsertests_apk',
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'chromedriver_webview_shell': PackageInfo(
'org.chromium.chromedriver_webview_shell',
'org.chromium.chromedriver_webview_shell.Main',
None,
None,
None),
}
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
TEST_POLICY_SERVER_PORT = 9051
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = (
'/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
ANDROID_SDK_VERSION = version_codes.MARSHMALLOW
ANDROID_SDK_BUILD_TOOLS_VERSION = '23.0.1'
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'sdk')
ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'ndk')
PROGUARD_SCRIPT_PATH = os.path.join(
ANDROID_SDK_ROOT, 'tools', 'proguard', 'bin', 'proguard.sh')
PROGUARD_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'proguard')
BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR', 'out'),
'bad_devices.json')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
# TODO(jbudorick): Remove once unused.
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
# TODO(jbudorick): Rework this into testing/buildbot/
PYTHON_UNIT_TEST_SUITES = {
'pylib_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'devil.android.device_utils_test',
'devil.android.md5sum_test',
'devil.utils.cmd_helper_test',
'pylib.results.json_results_test',
'pylib.utils.proguard_test',
]
},
'gyp_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
'test_modules': [
'java_cpp_enum_tests',
'java_google_api_keys_tests',
]
},
}
LOCAL_MACHINE_TESTS = ['junit', 'python']
VALID_ENVIRONMENTS = ['local', 'remote_device']
VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
'perf', 'python', 'uirobot']
VALID_DEVICE_TYPES = ['Android', 'iOS']
def GetBuildType():
try:
return os.environ['BUILDTYPE']
except KeyError:
raise EnvironmentError(
'The BUILDTYPE environment variable has not been set')
def SetBuildType(build_type):
os.environ['BUILDTYPE'] = build_type
def SetBuildDirectory(build_directory):
os.environ['CHROMIUM_OUT_DIR'] = build_directory
def SetOutputDirectory(output_directory):
os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
def GetOutDirectory(build_type=None):
"""Returns the out directory where the output binaries are built.
Args:
build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
globally set build type environment variable BUILDTYPE.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
GetBuildType() if build_type is None else build_type))
# TODO(jbudorick): Convert existing callers to AdbWrapper.GetAdbPath() and
# remove this.
def GetAdbPath():
from devil.android.sdk import adb_wrapper
return adb_wrapper.AdbWrapper.GetAdbPath()
# Exit codes
ERROR_EXIT_CODE = exit_codes.ERROR
INFRA_EXIT_CODE = exit_codes.INFRA
WARNING_EXIT_CODE = exit_codes.WARNING
|
the-stack_0_9644 | # Note: Before running this from your laptop, you must run "ray attach cluster.yaml -p 8000" to setup a port-forward from the laptop's port 8000 to the cluster's internal port 8000
# The other option is to use "ray submit" to run this on the cluster as-is without a port-forward
import requests
input_text_list = ["Ray Serve is great!", "Serving frameworks without DAG support are not great."]
for input_text in input_text_list:
prediction = requests.get("http://127.0.0.1:8000/invocations", data=input_text).text
print("Average prediction for '{}' is {}".format(input_text, prediction))
|
the-stack_0_9647 | import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, GROUP_NAME
PROJECT_NAME = 'project_settings_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
gl = get_gitlab()
def fin():
gl.delete_project(GROUP_AND_PROJECT_NAME)
request.addfinalizer(fin)
return gl # provide fixture value
config_builds_for_private_projects = """
gitlab:
api_version: 4
project_settings:
project_settings:
builds_access_level: private
visibility: private
"""
class TestProjectSettings:
def test__builds_for_private_projects(self, gitlab):
gf = GitLabForm(config_string=config_builds_for_private_projects,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
settings = gitlab.get_project_settings(GROUP_AND_PROJECT_NAME)
assert settings['visibility'] == 'private'
# there is no such field in the "Get single project" API :/
#assert settings['builds_access_level'] is 'private'
|
the-stack_0_9648 | # -*- coding: utf-8 -*-
import json
import requests
from openerp import http
from openerp.http import request
def s2human(time):
"""Convert a time in second into an human readable string"""
for delay, desc in [(86400,'d'),(3600,'h'),(60,'m')]:
if time >= delay:
return str(int(time / delay)) + desc
return str(int(time)) + "s"
class RunbotButtons(http.Controller):
def build_info(self, build):
real_build = build.duplicate_id if build.state == 'duplicate' else build
return {
'id': build.id,
'name': build.name,
'state': real_build.state,
'result': real_build.result,
'subject': build.subject,
'author': build.author,
'committer': build.committer,
'dest': build.dest,
'real_dest': real_build.dest,
'job_age': s2human(real_build.job_age),
'job_time': s2human(real_build.job_time),
'job': real_build.job,
'domain': real_build.domain,
'host': real_build.host,
'port': real_build.port,
'subject': build.subject,
'server_match': real_build.server_match,
}
def build_html(self, build):
res = []
try:
url = 'http://%s/instance_introspection.json' % build.domain
response = requests.get(url,
timeout=5.00)
if response.status_code == requests.codes.ok:
res = response.json()
except requests.exceptions.Timeout:
res = [{'info': {'error': 'Timeout',
'message': '''Instance is not running
https://github.com/Vauxoo/server-tools/tree/8.0/instance_introspection
read the help to know how configure it properlly'''}}]
except requests.exceptions.TooManyRedirects:
res = [{'info': {'error': 'TooMany redirect',
'message': '''Install properly the instance_introspection:
https://github.com/Vauxoo/server-tools/tree/8.0/instance_introspection
read the help to know how'''}}]
except requests.exceptions.RequestException as e:
res = [{'info': {'error': 'Unknown Error',
'message': '''%s''' % e.message}}]
# catastrophic error. bail.
_logger.log(e)
return res
@http.route(['/vauxooci/build_button/<build_id>'], type='http', auth="public", website=True)
def build(self, build_id=None, search=None, **post):
registry, cr, uid, context = request.registry, request.cr, request.uid, request.context
Build = registry['runbot.build']
build = Build.browse(cr, uid, [int(build_id)])[0]
if not build.exists():
return request.not_found()
context = {
'introspection': build.introspection,
'introspection_html': self.build_html(build),
'repo': build.repo_id,
'bu': self.build_info(build),
'br': {'branch': build.branch_id},
}
return request.render("vauxooci.build_button", context)
# @http.route('/runbot_frontend/runbot_frontend/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('runbot_frontend.listing', {
# 'root': '/runbot_frontend/runbot_frontend',
# 'objects': http.request.env['runbot_frontend.runbot_frontend'].search([]),
# })
# @http.route('/runbot_frontend/runbot_frontend/objects/<model("runbot_frontend.runbot_frontend"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('runbot_frontend.object', {
# 'object': obj
# })
|
the-stack_0_9653 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Bring Your Own Datatypes to TVM
===============================
**Authors**: `Gus Smith <https://github.com/gussmith23>`_, `Andrew Liu <https://github.com/hypercubestart>`_
In this tutorial, we will show you how to utilize the Bring Your Own Datatypes framework to use your own custom datatypes in TVM.
Note that the Bring Your Own Datatypes framework currently only handles **software emulated versions of datatypes**.
The framework does not support compiling for custom accelerator datatypes out-of-the-box.
Datatype Libraries
------------------
The Bring Your Own Datatypes allows users to register their own datatype implementations alongside TVM's native datatypes (such as ``float``).
In the wild, these datatype implementations often appear as libraries.
For example:
- `libposit <https://github.com/cjdelisle/libposit>`_, a posit library
- `Stillwater Universal <https://github.com/stillwater-sc/universal>`_, a library with posits, fixed-point numbers, and other types
- `SoftFloat <https://github.com/ucb-bar/berkeley-softfloat-3>`_, Berkeley's software implementation of IEEE 754 floating-point
The Bring Your Own Datatypes enables users to plug these datatype implementations into TVM!
In this section, we will use an example library we have already implemented, located at ``3rdparty/byodt/myfloat.cc``.
This datatype, which we dubbed "myfloat", is really just a IEE-754 float under-the-hood, but it serves a useful example
to show that any datatype can be used in the BYODT framework.
Setup
-----
Since we do not use any 3rdparty library, there is no setup needed.
If you would like to try this with your own datatype library, first bring the library's functions into the process space with ``CDLL``:
.. code-block :: python
ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
"""
######################
# A Simple TVM Program
# --------------------
#
# We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes.
import tvm
from tvm import relay
# Our basic program: Z = X + Y
x = relay.var("x", shape=(3,), dtype="float32")
y = relay.var("y", shape=(3,), dtype="float32")
z = x + y
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
######################################################################
# Now, we create random inputs to feed into this program using numpy:
import numpy as np
np.random.seed(23) # for reproducibility
x_input = np.random.rand(3).astype("float32")
y_input = np.random.rand(3).astype("float32")
print("x: {}".format(x_input))
print("y: {}".format(y_input))
######################################################################
# Finally, we're ready to run the program:
z_output = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output))
######################################################################
# Adding Custom Datatypes
# -----------------------
# Now, we will do the same, but we will use a custom datatype for our intermediate computation.
#
# We use the same input variables ``x`` and ``y`` as above, but before adding ``x + y``, we first cast both ``x`` and ``y`` to a custom datatype via the ``relay.cast(...)`` call.
#
# Note how we specify the custom datatype: we indicate it using the special ``custom[...]`` syntax.
# Additionally, note the "32" after the datatype: this is the bitwidth of the custom datatype. This tells TVM that each instance of ``myfloat`` is 32 bits wide.
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Trying to generate this program throws an error from TVM.
# TVM does not know how to handle any custom datatype out of the box!
# We first have to register the custom type with TVM, giving it a name and a type code:
tvm.target.datatype.register("myfloat", 150)
######################################################################
# Note that the type code, 150, is currently chosen manually by the user.
# See ``TVMTypeCode::kCustomBegin`` in `include/tvm/runtime/c_runtime_api.h <https://github.com/apache/tvm/blob/main/include/tvm/runtime/data_type.h>`_.
# Now we can generate our program again:
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
module = relay.transform.InferType()(module)
######################################################################
# Now we have a Relay program that uses myfloat!
print(program)
######################################################################
# Now that we can express our program without errors, let's try running it!
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(y_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Now, trying to compile this program throws an error.
# Let's dissect this error.
#
# The error is occurring during the process of lowering the custom datatype code to code that TVM can compile and run.
# TVM is telling us that it cannot find a *lowering function* for the ``Cast`` operation, when casting from source type 2 (``float``, in TVM), to destination type 150 (our custom datatype).
# When lowering custom datatypes, if TVM encounters an operation over a custom datatype, it looks for a user-registered *lowering function*, which tells it how to lower the operation to an operation over datatypes it understands.
# We have not told TVM how to lower ``Cast`` operations for our custom datatypes; thus, the source of this error.
#
# To fix this error, we simply need to specify a lowering function:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func(
{
(32, 32): "FloatToCustom32", # cast from float32 to myfloat32
}
),
"Cast",
"llvm",
"float",
"myfloat",
)
######################################################################
# The ``register_op(...)`` call takes a lowering function, and a number of parameters which specify exactly the operation which should be lowered with the provided lowering function.
# In this case, the arguments we pass specify that this lowering function is for lowering a ``Cast`` from ``float`` to ``myfloat`` for target ``"llvm"``.
#
# The lowering function passed into this call is very general: it should take an operation of the specified type (in this case, `Cast`) and return another operation which only uses datatypes which TVM understands.
#
# In the general case, we expect users to implement operations over their custom datatypes using calls to an external library.
# In our example, our ``myfloat`` library implements a ``Cast`` from ``float`` to 32-bit ``myfloat`` in the function ``FloatToCustom32``.
# To provide for the general case, we have made a helper function, ``create_lower_func(...)``,
# which does just this: given a dictionary, it replaces the given operation with a ``Call`` to the appropriate function name provided based on the op and the bit widths.
# It additionally removes usages of the custom datatype by storing the custom datatype in an opaque ``uint`` of the appropriate width; in our case, a ``uint32_t``.
# For more information, see `the source code <https://github.com/apache/tvm/blob/main/python/tvm/target/datatype.py>`_.
# We can now re-try running the program:
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# This new error tells us that the ``Add`` lowering function is not found, which is good news, as it's no longer complaining about the ``Cast``!
# We know what to do from here: we just need to register the lowering functions for the other operations in our program.
#
# Note that for ``Add``, ``create_lower_func`` takes in a dict where the key is an integer.
# For ``Cast`` operations, we require a 2-tuple to specify the ``src_bit_length`` and the ``dest_bit_length``,
# while for all other operations, the bit length is the same between the operands so we only require one integer to specify ``bit_length``.
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Add"}),
"Add",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({(32, 32): "Custom32ToFloat"}),
"Cast",
"llvm",
"myfloat",
"float",
)
# Now, we can run our program without errors.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
print("x:\t\t{}".format(x_input))
print("y:\t\t{}".format(y_input))
print("z (float32):\t{}".format(z_output))
print("z (myfloat32):\t{}".format(z_output_myfloat))
# Perhaps as expected, the ``myfloat32`` results and ``float32`` are exactly the same!
######################################################################
# Running Models With Custom Datatypes
# ------------------------------------
#
# We will first choose the model which we would like to run with myfloat.
# In this case we use `Mobilenet <https://arxiv.org/abs/1704.04861>`_.
# We choose Mobilenet due to its small size.
# In this alpha state of the Bring Your Own Datatypes framework, we have not implemented any software optimizations for running software emulations of custom datatypes; the result is poor performance due to many calls into our datatype emulation library.
#
# First let us define two helper functions to get the mobilenet model and a cat image.
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
def get_cat_image():
from tvm.contrib.download import download_testdata
from PIL import Image
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize((224, 224))
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
module, params = get_mobilenet()
######################################################################
# It's easy to execute MobileNet with native TVM:
ex = tvm.relay.create_executor("graph", mod=module, params=params)
input = get_cat_image()
result = ex.evaluate()(input).numpy()
# print first 10 elements
print(result.flatten()[:10])
######################################################################
# Now, we would like to change the model to use myfloat internally. To do so, we need to convert the network. To do this, we first define a function which will help us convert tensors:
def convert_ndarray(dst_dtype, array):
"""Converts an NDArray into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
######################################################################
# Now, to actually convert the entire network, we have written `a pass in Relay <https://github.com/gussmith23/tvm/blob/ea174c01c54a2529e19ca71e125f5884e728da6e/python/tvm/relay/frontend/change_datatype.py#L21>`_ which simply converts all nodes within the model to use the new datatype.
from tvm.relay.frontend.change_datatype import ChangeDatatype
src_dtype = "float32"
dst_dtype = "custom[myfloat]32"
module = relay.transform.InferType()(module)
# Currently, custom datatypes only work if you run simplify_inference beforehand
module = tvm.relay.transform.SimplifyInference()(module)
# Run type inference before changing datatype
module = tvm.relay.transform.InferType()(module)
# Change datatype from float to myfloat and re-infer types
cdtype = ChangeDatatype(src_dtype, dst_dtype)
expr = cdtype.visit(module["main"])
module = tvm.relay.transform.InferType()(module)
# We also convert the parameters:
params = {k: convert_ndarray(dst_dtype, v) for k, v in params.items()}
# We also need to convert our input:
input = convert_ndarray(dst_dtype, input)
# Finally, we can try to run the converted model:
try:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = tvm.relay.create_executor("graph", mod=module).evaluate(expr)(
input, **params
)
except tvm.TVMError as e:
print(str(e).split("\n")[-1])
######################################################################
# When we attempt to run the model, we get a familiar error telling us that more functions need to be registered for myfloat.
#
# Because this is a neural network, many more operations are required.
# Here, we register all the needed functions:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "FloatToCustom32"}),
"FloatImm",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else"
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_call_pure_extern,
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.call_pure_extern",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Mul"}),
"Mul",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Div"}),
"Div",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sub"}),
"Sub",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Exp"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.exp",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Max"}),
"Max",
"llvm",
"myfloat",
)
tvm.target.datatype.register_min_func(
tvm.target.datatype.create_min_lower_func({32: "MinCustom32"}, "myfloat"),
"myfloat",
)
######################################################################
# Note we are making use of two new functions: ``register_min_func`` and ``create_min_lower_func``.
#
# ``register_min_func`` takes in an integer ``num_bits`` for the bit length, and should return an operation
# representing the minimum finite representable value for the custom data type with the specified bit length.
#
# Similar to ``register_op`` and ``create_lower_func``, the ``create_min_lower_func`` handles the general case
# where the minimum representable custom datatype value is implemented using calls to an external library.
#
# Now we can finally run the model:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = relay.create_executor(mod=module).evaluate(expr)(input, **params)
result_myfloat = convert_ndarray(src_dtype, result_myfloat).numpy()
# print first 10 elements
print(result_myfloat.flatten()[:10])
# Again, note that the output using 32-bit myfloat exactly the same as 32-bit floats,
# because myfloat is exactly a float!
np.testing.assert_array_equal(result, result_myfloat)
|
the-stack_0_9655 | """
ะกะบัะธะฟั, ะฒัะฒะพะดััะธะน ะธะฝัะพัะผะฐัะธั ะพ ัะฐะฑะปะธัะต ัะบัะฟะพััะฐ PE-ัะฐะนะปะฐ.
ะะปั ะบะฐะถะดะพะน ัะบัะฟะพััะธััะตะผะพะน ััะฝะบัะธะธ ะฒัะฒะพะดะธั ะธะผั ััะฝะบัะธะธ ะธ ะตะต RVA. ะขะฐะบะถะต ะฒัะฒะพะดะธั
ะพะฑัะตะต ะบะพะปะธัะตััะฒะพ ัะบัะฟะพััะธััะตะผัั
ััะฝะบัะธะน.
ะัะธะผะตั ะธัะฟะพะปัะทะพะฒะฐะฝะธั:
python get_export_info.py d:/file.exe
"""
import sys
import pefile
try:
file_path = sys.argv[1]
except IndexError:
print('ะะต ัะบะฐะทะฐะฝ ัะฐะนะป.')
sys.exit(0)
try:
pe = pefile.PE(file_path)
except FileNotFoundError:
print('ะะต ัะดะฐะตััั ะฝะฐะนัะธ ัะบะฐะทะฐะฝะฝัะน ัะฐะนะป:', sys.argv[1])
sys.exit(0)
except pefile.PEFormatError:
print('ะคะฐะนะป', sys.argv[1], 'ะฝะต ัะฒะปัะตััั PE ัะฐะนะปะพะผ Windows.')
sys.exit(0)
print('ะะธะฑะปะธะพัะตะบะฐ:', pe.DIRECTORY_ENTRY_EXPORT.name.decode('utf-8'))
if hasattr(pe, 'DIRECTORY_ENTRY_EXPORT'):
for export_entry in pe.DIRECTORY_ENTRY_EXPORT.symbols:
print('\t' + export_entry.name.decode('utf-8'))
print('\t\tะัะดะธะฝะฐะป:', str(hex(export_entry.ordinal)))
print('\t\tRVA ััะฝะบัะธะธ:', str(hex(export_entry.address)))
else:
print('ะคะฐะนะป', sys.argv[1], 'ะฝะต ัะพะดะตัะถะธั ัะตะบัะธั ัะบัะฟะพััะฐ.')
print('ะัะตะณะพ ัะบัะฟะพััะธััะตััั', len(pe.DIRECTORY_ENTRY_EXPORT.symbols), 'ััะฝะบัะธะน.')
|
the-stack_0_9658 | import time
import cv2
class DetectHumanMovement(object):
def __init__(self):
self.faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
self.handCascade = cv2.CascadeClassifier("haarcascade_hand_default.xml")
self.video_capture = cv2.VideoCapture(0)
while True:
if not self.video_capture.isOpened():
print('Unable to load camera.')
time.sleep(2)
continue
else:
break
# To capture in main game loop
def capture_gray_image(self):
retval, frame = self.video_capture.read()
self.frame = frame
if not retval:
raise Exception('Ops! Capture image failed.')
# convert to gray scale
self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
return self.gray
# detect all faces in gray image
def detect_faces(self):
faces = self.faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(35, 35)
)
return faces
# detect all fist
def detect_fists(self):
fists = self.handCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(35, 35)
)
return fists
"""
Define Face Horizontal Orientation (Left or Right)
window (ex = 0 to WIDTH)
---------------x'-------
-----x'-----------------
x' <- 500 and
x' <- 300
p = x' - (WIDTH / 2)
"""
def face_laterality_orientation(self, face, width):
(x, y, w, h) = face
orientation = int((x + (w / 2)) - (width / 2))
return orientation
"""
Check Fist to shoot missile
"""
def fist_check(self):
fists = self.detect_fists()
return len(fists)
|
the-stack_0_9659 | import os
import time
import torch
import torch.nn as nn
from model.utils.general import init_dir, get_logger
class BaseModel(object):
"""Generic class for our model
Usage:
1. init
2. build_train() or build_pred()
3. save and restore
4. train and evaluate
"""
# 1. init
def __init__(self, config, dir_output):
"""Defines self._config
Args:
config: (Config instance) class with hyper parameters, from "model.json"
dir_output: output dir
"""
self._config = config
self._dir_output = dir_output
self._init_relative_path(dir_output)
self.logger = get_logger(dir_output + "model.log")
def _init_relative_path(self, dir_output):
# init parent dir
init_dir(dir_output)
# 1. init child dir
# check dir one last time
self._dir_model = dir_output + "model_weights/"
init_dir(self._dir_model)
# 2. define model path
self._model_path = self._dir_model + "model.cpkt"
# 2. build
def build_train(self, config=None):
"""To overwrite with model-specific logic
This logic must define
- self.model_name
- self.loss
- self.lr
- etc.
Args:
config: from "training.json" and "model.json"
"""
self.logger.info("- Building model...")
self._init_model(config.model_name, config.device)
self._init_optimizer(config.lr_method, config.lr_init)
self._init_scheduler(config.lr_scheduler)
self._init_criterion(config.criterion_method)
self.logger.info("- done.")
def build_pred(self, config=None):
"""Similar to build_train but no need to define train_op
Args:
config: from "model.json"
"""
self.logger.info("- Building model...")
self._init_model(config.model_name, config.device)
self.logger.info("- done.")
def _init_model(self, model_name="CNN", device="cpu"):
self.logger.info(" - " + model_name)
self.logger.info(" - " + device)
self.device = torch.device(device if torch.cuda.is_available() else 'cpu')
self.model = self.getModel(model_name)
self.model = self.model.to(self.device)
def _init_optimizer(self, lr_method="adam", lr=1e-3):
"""Defines self.optimizer that performs an update on a batch
Args:
lr_method: (string) sgd method, for example "adam"
lr: init learning rate (initial value)
"""
# 1. optimizer
_lr_m = lr_method.lower() # lower to make sure
print(" - " + _lr_m)
self.optimizer = self.getOptimizer(_lr_m, lr)
def _init_scheduler(self, lr_scheduler="CosineAnnealingLR"):
"""Defines self.scheduler that performs an update on a batch
Args:
lr_scheduler: (string) learning rate schedule method, for example "CosineAnnealingLR"
"""
# 2. scheduler
print(" - lr_scheduler " + lr_scheduler)
self.scheduler = self.getLearningRateScheduler(lr_scheduler)
def _init_criterion(self, criterion_method="CrossEntropyLoss"):
"""Defines self.criterion that performs an update on a batch
Args:
criterion_method: (string) criterion method, for example "CrossEntropyLoss"
"""
# 3. criterion
print(" - " + criterion_method)
self.criterion = self.getCriterion(criterion_method)
# ! MUST OVERWRITE
def getModel(self, model_name="CNN"):
"""return your Model
Args:
model_name: String, from "model.json"
Returns:
your model that inherits from torch.nn
"""
raise NotImplementedError("return your model ({}) that inherits from torch.nn".format(model_name))
def getOptimizer(self, lr_method="adam", lr=1e-3):
if lr_method == 'adam':
return torch.optim.Adam(self.model.parameters(), lr=lr)
elif lr_method == 'adamax':
return torch.optim.Adamax(self.model.parameters(), lr=lr)
elif lr_method == 'sgd':
return torch.optim.SGD(self.model.parameters(), lr=lr)
else:
raise NotImplementedError("Unknown Optimizer {}".format(lr_method))
def getLearningRateScheduler(self, lr_scheduler="CosineAnnealingLR"):
if lr_scheduler == "CosineAnnealingLR":
return torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=5, eta_min=4e-08)
else:
raise NotImplementedError("Unknown Learning Rate Scheduler {}".format(lr_scheduler))
def getCriterion(self, criterion_method="CrossEntropyLoss"):
if criterion_method == 'CrossEntropyLoss':
return torch.nn.CrossEntropyLoss()
elif criterion_method == 'MSELoss':
return torch.nn.MSELoss()
elif criterion_method == 'BCEWithLogitsLoss':
return torch.nn.BCEWithLogitsLoss()
else:
raise NotImplementedError("Unknown Criterion Method {}".format(criterion_method))
# 3. save and restore
def auto_restore(self):
if os.path.exists(self._model_path) and os.path.isfile(self._model_path):
self.restore()
def restore(self, model_path=None, map_location='cpu'):
"""Reload weights into session
Args:
model_path: weights path "model_weights/model.cpkt"
map_location: 'cpu' or 'gpu:0'
"""
self.logger.info("- Reloading the latest trained model...")
if model_path == None:
self.model.load_state_dict(torch.load(self._model_path, map_location=map_location))
else:
self.model.load_state_dict(torch.load(model_path, map_location=map_location))
def save(self):
"""Saves model"""
self.logger.info("- Saving model...")
torch.save(self.model.state_dict(), self._model_path)
self.logger.info("- Saved model in {}".format(self._dir_model))
# 4. train and evaluate
def train(self, config, train_set, val_set, lr_schedule, path_label):
"""Global training procedure
Calls method self.run_epoch and saves weights if score improves.
All the epoch-logic including the lr_schedule update must be done in
self.run_epoch
Args:
config: Config instance contains params as attributes
train_set: Dataset instance
val_set: Dataset instance
lr_schedule: LRSchedule instance that takes care of learning proc
path_label: dataframe
Returns:
best_score: (float)
"""
best_score = None
for epoch in range(config.n_epochs):
# logging
tic = time.time()
self.logger.info("Epoch {:}/{:}".format(epoch + 1, config.n_epochs))
# epoch
score = self._run_train_epoch(config, train_set, val_set, epoch, lr_schedule, path_label)
# save weights if we have new best score on eval
if best_score is None or score >= best_score: # abs(score-0.5) <= abs(best_score-0.5):
best_score = score
self.logger.info("- New best score ({:04.2f})!".format(best_score))
self.save()
if lr_schedule.stop_training:
self.logger.info("- Early Stopping.")
break
# logging
toc = time.time()
self.logger.info("- Elapsed time: {:04.2f}, learning rate: {:04.5f}".format(toc - tic, lr_schedule.lr))
return best_score
def evaluate(self, config, test_set, path_label):
"""Evaluates model on test set
Calls method run_evaluate on test_set and takes care of logging
Args:
config: Config
test_set: instance of class Dataset
path_label: dataframe
Return:
scores: (dict) scores["acc"] = 0.85 for instance
"""
self.logger.info("- Evaluating...")
scores = self._run_evaluate_epoch(config, test_set, path_label) # evaluate
msg = " ... ".join([" {} is {:04.2f} ".format(k, v) for k, v in scores.items()])
self.logger.info("- Eval: {}".format(msg))
return scores
def _auto_backward(self, loss):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ! MUST OVERWRITE
def _run_train_epoch(config, train_set, val_set, epoch, lr_schedule, path_label):
"""Model_specific method to overwrite
Performs an epoch of training
Args:
config: Config
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score
Alert:
you can use the method below to simplify your code
_auto_backward(self, loss)
"""
raise NotImplementedError("Performs an epoch of training")
# ! MUST OVERWRITE
def _run_evaluate_epoch(config, test_set):
"""Model-specific method to overwrite
Performs an epoch of evaluation
Args:
config: Config
test_set: Dataset instance
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
raise NotImplementedError("Performs an epoch of evaluation")
|
the-stack_0_9663 | import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='yanchor', parent_name='mesh3d.colorbar', **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop('values', ['top', 'middle', 'bottom']),
**kwargs
)
|
the-stack_0_9665 | from __future__ import print_function
import numpy as np
import pytest
def pytest_runtest_setup(item):
seed = np.random.randint(1000)
print("Seed used in np.random.seed(): %d" % seed)
np.random.seed(seed)
def pytest_addoption(parser):
parser.addoption(
"--block",
action="store",
default=True,
help="Whether the plotting should block execution."
)
@pytest.fixture
def block(request):
try:
return request.config.getoption("--block") not in "False,false,no,0".split(",")
except ValueError:
return True
|
the-stack_0_9666 | from rest_framework import status
from rest_framework.reverse import reverse
from tests.test_profile.test_quota.base_test_quota import BaseTestQuota
from tests.utils import check_data_in_dict
class TestApiQuotaCreate(BaseTestQuota):
def setUp(self):
super(TestApiQuotaCreate, self).setUp()
self.post_data = {
'name': "My new quota",
'attribute_definitions': [self.memory_attributes.first().id, self.cpu_attributes.first().id]
}
self.create_quota_url = reverse('api_quota_list_create')
def _create_quota(self):
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
check_data_in_dict(self, [self.post_data], [response.data])
def _create_quota_failed(self, status_error=status.HTTP_400_BAD_REQUEST):
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status_error)
def test_admin_post_quota(self):
self._create_quota()
def test_cannot_post_quota_with_existing_name(self):
self._create_quota()
self._create_quota_failed()
def test_customer_cannot_post_quota(self):
self.client.force_login(user=self.standard_user)
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_post_quota_when_logout(self):
self.client.logout()
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
the-stack_0_9671 | from flask.ext.classy import FlaskView, route
from flask import render_template, redirect, url_for, flash
from flask_menu.classy import classy_menu_item
from flask_login import current_user, login_required
from Application.models import User
from Application.models import Project
from .forms import UserEditForm
from Application import db
from speaklater import make_lazy_string
@make_lazy_string
def account_text():
if current_user.is_authenticated:
return "Account ({})".format(current_user.fullname)
return "Account"
def show_menu():
return current_user.is_authenticated
class Profile(FlaskView):
route_base = '/profile'
@classy_menu_item('frontend-right.account', account_text, visible_when=show_menu, order=1)
@classy_menu_item('frontend-right.account.profile', 'My Profile', order=0)
@login_required
def index(self):
return redirect(url_for('.Profile:me'))
@login_required
@route('/me/')
def me(self):
user = current_user
projects = None
if user.projects.count():
projects = user.projects.order_by(Project.date_uploaded.desc())
following = False
if current_user.is_authenticated:
following = current_user.following.filter_by(zid=current_user.zid).count() != 0
return render_template(
'.profile/index.html',
user=user, following=following, projects=projects)
@route('/<string:user_id>/')
def user(self, user_id):
user = User.query.filter(User.zid == user_id).first_or_404()
projects = None
if user.projects.count():
projects = user.projects.order_by(Project.date_uploaded.desc())
following = False
if current_user.is_authenticated:
following = current_user.following.filter_by(zid=user_id).count() != 0
return render_template(
'.profile/index.html',
user=user,
following=following,
projects=projects)
@login_required
@route('/edit/', methods=['GET', 'POST'])
def edit(self):
form = UserEditForm(obj=current_user)
if form.submit.data and form.validate_on_submit:
# update the user's details
current_user.website = form.website.data
current_user.github_username = form.github_username.data
current_user.email = form.email.data
current_user.about = form.about.data
db.session.add(current_user)
db.session.commit()
flash('Sucessfully updated your details!', 'success')
return redirect(url_for('.Profile:me'))
return render_template(
".profile/edit_user.html",
is_form=True, form=form, user=current_user)
@login_required
@route('/follow/<string:user_id>/')
def follow(self, user_id):
following = current_user.following.filter_by(zid=user_id).count()
if user_id == current_user.zid:
flash("Error: you cannot follow yourself", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
if following:
flash("Error: you already follow this user", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
# Add follower relationship here
followee = User.query.get_or_404(user_id)
current_user.following.append(followee)
db.session.add(current_user)
db.session.commit()
flash("User followed successfully", 'success')
return redirect(url_for('.Profile:user', user_id=user_id))
@login_required
@route('/unfollow/<string:user_id>/')
def unfollow(self, user_id):
following = current_user.following.filter_by(zid=user_id).count()
if user_id == current_user.zid:
flash("Error: you cannot unfollow yourself", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
if following:
# Remove relationship here
followee = User.query.get_or_404(user_id)
current_user.following.remove(followee)
db.session.add(current_user)
db.session.commit()
flash("User unfollowed successfully", 'success')
return redirect(url_for('.Profile:user', user_id=user_id))
flash("Error: you don't follow this user", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
|
the-stack_0_9672 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Shreyas Srish (@shrsr) <[email protected]>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_bd_dhcp_policy
short_description: Manage BD DHCP Policy in schema templates
description:
- Manage BD DHCP policies in schema templates on Cisco ACI Multi-Site.
author:
- Shreyas Srish (@shrsr)
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template to change.
type: str
required: yes
bd:
description:
- The name of the BD to manage.
type: str
required: yes
dhcp_policy:
description:
- The DHCP Policy
type: str
aliases: [ name ]
version:
description:
- The version of DHCP Relay Policy.
type: int
dhcp_option_policy:
description:
- The DHCP Option Policy.
type: dict
suboptions:
name:
description:
- The name of the DHCP Option Policy.
type: str
required: yes
version:
description:
- The version of the DHCP Option Policy.
type: int
required: yes
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- This module can only be used on versions of MSO that are 3.1.1h or greater.
extends_documentation_fragment: cisco.mso.modules
'''
EXAMPLES = r'''
- name: Add a new DHCP policy to a BD
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
name: ansible_test
version: 1
dhcp_option_policy:
name: ansible_test_option
version: 1
state: present
delegate_to: localhost
- name: Remove a DHCP policy from a BD
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
name: ansible_test
version: 1
state: absent
delegate_to: localhost
- name: Query a specific BD DHCP Policy
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
name: ansible_test
state: query
delegate_to: localhost
register: query_result
- name: Query all BD DHCP Policies
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.mso.plugins.module_utils.mso import MSOModule, mso_argument_spec, mso_dhcp_option_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
bd=dict(type='str', required=True),
dhcp_policy=dict(type='str', aliases=['name']),
version=dict(type='int'),
dhcp_option_policy=dict(type='dict', options=mso_dhcp_option_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dhcp_policy']],
['state', 'present', ['dhcp_policy', 'version']],
],
)
schema = module.params.get('schema')
template = module.params.get('template').replace(' ', '')
bd = module.params.get('bd')
dhcp_policy = module.params.get('dhcp_policy')
dhcp_option_policy = module.params.get('dhcp_option_policy')
version = module.params.get('version')
state = module.params.get('state')
mso = MSOModule(module)
# Get schema
schema_id, schema_path, schema_obj = mso.query_schema(schema)
# Get template
templates = [t.get('name') for t in schema_obj.get('templates')]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get BD
bds = [b.get('name') for b in schema_obj.get('templates')[template_idx]['bds']]
if bd not in bds:
mso.fail_json(msg="Provided BD '{0}' does not exist. Existing BDs: {1}".format(bd, ', '.join(bds)))
bd_idx = bds.index(bd)
# Check if DHCP policy already exists
if dhcp_policy:
check_policy = mso.get_obj("policies/dhcp/relay", name=dhcp_policy, key="DhcpRelayPolicies")
if check_policy:
pass
else:
mso.fail_json(msg="DHCP policy '{dhcp_policy}' does not exist".format(dhcp_policy=dhcp_policy))
# Check if DHCP option policy already exists
if dhcp_option_policy:
check_option_policy = mso.get_obj("policies/dhcp/option", name=dhcp_option_policy.get('name'), key="DhcpRelayPolicies")
if check_option_policy:
pass
else:
mso.fail_json(msg="DHCP option policy '{dhcp_option_policy}' does not exist".format(dhcp_option_policy=dhcp_option_policy.get('name')))
# Get DHCP policies
dhcp_policies = [s.get('name') for s in schema_obj.get('templates')[template_idx]['bds'][bd_idx]['dhcpLabels']]
if dhcp_policy in dhcp_policies:
dhcp_idx = dhcp_policies.index(dhcp_policy)
# FIXME: Changes based on index are DANGEROUS
dhcp_policy_path = '/templates/{0}/bds/{1}/dhcpLabels/{2}'.format(template, bd, dhcp_idx)
mso.existing = schema_obj.get('templates')[template_idx]['bds'][bd_idx]['dhcpLabels'][dhcp_idx]
if state == 'query':
if dhcp_policy is None:
mso.existing = schema_obj.get('templates')[template_idx]['bds'][bd_idx]['dhcpLabels']
elif not mso.existing:
mso.fail_json(msg="DHCP policy not associated with the bd")
mso.exit_json()
dhcp_policy_paths = '/templates/{0}/bds/{1}/dhcpLabels'.format(template, bd)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=dhcp_policy_path))
elif state == 'present':
payload = dict(
name=dhcp_policy,
version=version,
dhcpOptionLabel=dhcp_option_policy,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=dhcp_policy_path, value=mso.sent))
else:
ops.append(dict(op='add', path=dhcp_policy_paths + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
the-stack_0_9674 | #
# POC FTP Browser for Enigma2
#
# for localized messages
from . import _
# Config
from Components.config import config, ConfigInteger, ConfigSubList, \
ConfigSubsection, ConfigText, ConfigPassword, ConfigYesNo
config.plugins.ftpbrowser = ConfigSubsection()
config.plugins.ftpbrowser.server = ConfigSubList()
config.plugins.ftpbrowser.servercount = ConfigInteger(0)
i = 0
append = config.plugins.ftpbrowser.server.append
while i < config.plugins.ftpbrowser.servercount.value:
newServer = ConfigSubsection()
append(newServer)
newServer.name = ConfigText("Name", fixed_size=False)
newServer.address = ConfigText("192.168.2.12", fixed_size=False)
newServer.username = ConfigText("root", fixed_size=False)
newServer.password = ConfigPassword("dreambox")
newServer.port = ConfigInteger(21, (1, 65535))
newServer.passive = ConfigYesNo(False)
i += 1
del newServer
del append, i
from FTPBrowser import FTPBrowser
from FTPServerManager import ftpserverFromURI
ftpbrowser = None
def createSingleton(session):
global ftpbrowser
if not ftpbrowser:
ftpbrowser = session.instantiateDialog(FTPBrowser)
return False
return True
def main(session, **kwargs):
createSingleton(session)
session.execDialog(ftpbrowser)
def filescan_chosen(session, item):
if item:
createSingleton(session)
ftpbrowser.connect(ftpserverFromURI(item[1], save = False))
session.execDialog(ftpbrowser)
def filescan_open_connected(res, items, session, **kwargs):
if res:
ftpbrowser.disconnect()
filescan_open(items, session, **kwargs)
def filescan_open(items, session, **kwargs):
if createSingleton(session) and ftpbrowser.ftpclient:
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
session.openWithCallback(
boundFunction(filescan_open_connected, items, session, **kwargs),
MessageBox,
_("There already is an active connection.\nDo you want to abort it?"),
type = MessageBox.TYPE_YESNO
)
return
Len = len(items)
if Len > 1:
from Screens.ChoiceBox import ChoiceBox
from Tools.BoundFunction import boundFunction
session.openWithCallback(
boundFunction(filescan_chosen, session),
ChoiceBox,
_("Which server do you want to connect to?"),
[(item, item) for item in items]
)
elif Len:
filescan_chosen(items[0])
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
# Overwrite checkFile to detect remote files
class RemoteScanner(Scanner):
def checkFile(self, file):
return file.path.startswith("ftp://")
return [
RemoteScanner(
mimetypes = None,
paths_to_scan =
(
ScanPath(path = "", with_subdirs = False),
),
name = "Connect",
description = _("Connect to FTP..."),
openfnc = filescan_open,
),
]
def Plugins(**kwargs):
from Plugins.Plugin import PluginDescriptor
return [
PluginDescriptor(
name="FTPBrowser",
description = _("A basic FTP client"),
where = PluginDescriptor.WHERE_PLUGINMENU,
icon = "plugin.png",
fnc = main,
needsRestart = False
),
PluginDescriptor(
name = "FTPBrowser",
where = PluginDescriptor.WHERE_FILESCAN,
fnc = filescan,
needsRestart = False,
),
]
|
the-stack_0_9675 | from __future__ import absolute_import
import os
import re
import json
import base64
import inspect
import requests
import mimetypes
from contextlib import contextmanager
from datetime import datetime, timedelta
from django.conf import settings
from django.db import transaction
from pytz import utc
from random import randint
from six import StringIO
# Do not import from sentry here! Bad things will happen
optional_group_matcher = re.compile(r'\(\?\:([^\)]+)\)')
named_group_matcher = re.compile(r'\(\?P<(\w+)>[^\)]+\)')
non_named_group_matcher = re.compile(r'\([^\)]+\)')
# [foo|bar|baz]
either_option_matcher = re.compile(r'\[([^\]]+)\|([^\]]+)\]')
camel_re = re.compile(r'([A-Z]+)([a-z])')
API_PREFIX = '/api/0/'
scenarios = {}
def simplify_regex(pattern):
"""Clean up urlpattern regexes into something somewhat readable by
Mere Humans: turns something like
"^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into
"{sport_slug}/athletes/{athlete_slug}/"
"""
pattern = optional_group_matcher.sub(lambda m: '[%s]' % m.group(1), pattern)
# handle named groups first
pattern = named_group_matcher.sub(lambda m: '{%s}' % m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("{var}", pattern)
# handle optional params
pattern = either_option_matcher.sub(lambda m: m.group(1), pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '') \
.replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
def get_internal_endpoint_from_pattern(pattern):
from sentry.api.base import Endpoint
if not hasattr(pattern, 'callback'):
return
if hasattr(pattern.callback, 'cls'):
cls = pattern.callback.cls
if issubclass(cls, Endpoint):
return cls
elif hasattr(pattern.callback, 'cls_instance'):
inst = pattern.callback.cls_instance
if isinstance(inst, Endpoint):
return inst.__class__
def extract_documentation(func):
doc = inspect.getdoc(func)
if doc is not None:
return doc.decode('utf-8')
def get_endpoint_path(internal_endpoint):
return '%s.%s' % (internal_endpoint.__module__, internal_endpoint.__name__, )
def extract_title_and_text(doc):
title = None
iterable = iter((doc or u'').splitlines())
clean_end = False
for line in iterable:
line = line.strip()
if title is None:
if not line:
continue
title = line
elif line[0] * len(line) == line:
clean_end = True
break
else:
break
lines = []
if clean_end:
for line in iterable:
if line.strip():
lines.append(line)
break
lines.extend(iterable)
return title, lines
def camelcase_to_dashes(string):
def handler(match):
camel, regular = match.groups()
if len(camel) != 1:
camel = camel[:-1].lower() + '-' + camel[-1].lower()
else:
camel = camel.lower()
return '-' + camel + regular.lower()
return camel_re.sub(handler, string).lstrip('-')
def extract_endpoint_info(pattern, internal_endpoint):
path = simplify_regex(pattern.regex.pattern)
from sentry.constants import HTTP_METHODS
for method_name in HTTP_METHODS:
if method_name in ('HEAD', 'OPTIONS'):
continue
method = getattr(internal_endpoint, method_name.lower(), None)
if method is None:
continue
doc = extract_documentation(method)
if doc is None:
continue
section = getattr(internal_endpoint, 'doc_section', None)
if section is None:
continue
endpoint_name = method.__name__.title() + internal_endpoint.__name__
if endpoint_name.endswith('Endpoint'):
endpoint_name = endpoint_name[:-8]
endpoint_name = camelcase_to_dashes(endpoint_name)
title, text = extract_title_and_text(doc)
yield dict(
path=API_PREFIX + path.lstrip('/'),
method=method_name,
title=title,
text=text,
scenarios=getattr(method, 'api_scenarios', None) or [],
section=section.name.lower(),
internal_path='%s:%s' % (get_endpoint_path(internal_endpoint), method.__name__),
endpoint_name=endpoint_name,
)
def iter_endpoints():
from sentry.api.urls import urlpatterns
for pattern in urlpatterns:
internal_endpoint = get_internal_endpoint_from_pattern(pattern)
if internal_endpoint is None:
continue
for endpoint in extract_endpoint_info(pattern, internal_endpoint):
yield endpoint
def scenario(ident):
def decorator(f):
if ident in scenarios:
raise RuntimeError('Scenario duplicate: %s' % ident)
scenarios[ident] = f
f.api_scenario_ident = ident
return f
return decorator
def attach_scenarios(scenarios):
def decorator(f):
f.api_scenarios = [x.api_scenario_ident for x in scenarios]
return f
return decorator
def iter_scenarios():
# Make sure everything is imported.
for endpoint in iter_endpoints():
pass
return iter(sorted(scenarios.items()))
def get_sections():
from sentry.api.base import DocSection
return dict((x.name.lower(), x.value) for x in DocSection)
def create_sample_time_series(event):
from sentry.app import tsdb
group = event.group
now = datetime.utcnow().replace(tzinfo=utc)
for _ in range(60):
count = randint(1, 10)
tsdb.incr_multi(
((tsdb.models.project, group.project.id), (tsdb.models.group, group.id), ), now, count
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_received, group.project.organization_id),
(tsdb.models.project_total_received, group.project.id),
), now, int(count * 1.1)
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_rejected, group.project.organization_id),
(tsdb.models.project_total_rejected, group.project.id),
), now, int(count * 0.1)
)
now = now - timedelta(seconds=1)
for _ in range(24 * 30):
count = randint(100, 1000)
tsdb.incr_multi(
((tsdb.models.project, group.project.id), (tsdb.models.group, group.id), ), now, count
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_received, group.project.organization_id),
(tsdb.models.project_total_received, group.project.id),
), now, int(count * 1.1)
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_rejected, group.project.organization_id),
(tsdb.models.project_total_rejected, group.project.id),
), now, int(count * 0.1)
)
now = now - timedelta(hours=1)
class MockUtils(object):
def create_user(self, mail):
from sentry.models import User
user, _ = User.objects.get_or_create(
username=mail, defaults={
'email': mail,
}
)
user.set_password('dummy')
user.save()
return user
def create_org(self, name, owner):
from sentry.models import Organization, OrganizationMember
org, _ = Organization.objects.get_or_create(
name=name,
)
dummy_member, _ = OrganizationMember.objects.get_or_create(
user=owner, organization=org, defaults={
'role': 'member',
}
)
return org
def create_api_key(self, org, label='Default'):
from sentry.models import ApiKey
return ApiKey.objects.get_or_create(
organization=org,
label=label,
scopes=(1 << len(ApiKey.scopes.keys())) - 1,
)[0]
def create_client_key(self, project, label='Default'):
from sentry.models import ProjectKey
return ProjectKey.objects.get_or_create(project=project, label=label)[0]
def create_team(self, name, org):
from sentry.models import Team
return Team.objects.get_or_create(
name=name,
defaults={
'organization': org,
},
)[0]
def create_project(self, name, teams, org):
from sentry.models import Project
project = Project.objects.get_or_create(
name=name, defaults={
'organization': org,
}
)[0]
for team in teams:
project.add_team(team)
return project
def create_release(self, project, user, version=None):
from sentry.models import Release, Activity
if version is None:
version = os.urandom(20).encode('hex')
with transaction.atomic():
release = Release.objects.filter(
version=version, organization_id=project.organization_id, projects=project
).first()
if not release:
release = Release.objects.filter(
version=version,
organization_id=project.organization_id,
).first()
if not release:
release = Release.objects.create(
version=version,
organization_id=project.organization_id,
)
release.add_project(project)
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=Activity.get_version_ident(version),
user=user,
data={'version': version},
)
return release
def create_release_file(self, project, release, path, content_type=None, contents=None):
from sentry.models import File, ReleaseFile
if content_type is None:
content_type = mimetypes.guess_type(path)[0] or 'text/plain'
if content_type.startswith('text/'):
content_type += '; encoding=utf-8'
f = File.objects.create(
name=path.rsplit('/', 1)[-1],
type='release.file',
headers={'Content-Type': content_type},
)
f.putfile(StringIO(contents or ''))
return ReleaseFile.objects.create(
organization_id=project.organization_id, release=release, file=f, name=path
)
def create_event(self, project, release, platform='python', raw=True):
from sentry.utils.samples import create_sample_event
event = create_sample_event(
project=project, platform=platform, release=release.version, raw=raw
)
create_sample_time_series(event)
return event
class Runner(object):
"""The runner is a special object that holds state for the automatic
running of example scenarios. It gets created by api-docs/generator.py
which does the majority of the heavy lifting. It mainly exists here
so that the scenarios can be run separately if needed.
"""
def __init__(self, ident, func, api_key, org, me, teams=None):
self.ident = ident
self.func = func
self.requests = []
self.utils = MockUtils()
self.api_key = api_key
self.org = org
self.me = me
self.teams = teams
@property
def default_team(self):
return self.teams[0]['team']
@property
def default_project(self):
return self.teams[0]['projects'][0]['project']
@property
def default_release(self):
return self.teams[0]['projects'][0]['release']
@property
def default_event(self):
return self.teams[0]['projects'][0]['events'][0]
@contextmanager
def isolated_project(self, project_name):
from sentry.models import Group, Event
project = self.utils.create_project(project_name, teams=[self.default_team], org=self.org)
release = self.utils.create_release(project=project, user=self.me)
self.utils.create_event(project=project, release=release, platform='python')
self.utils.create_event(project=project, release=release, platform='java')
try:
yield project
finally:
# Enforce safe cascades into Group/Event
Group.objects.filter(
project=project,
).delete()
Event.objects.filter(
project_id=project.id,
).delete()
project.delete()
@contextmanager
def isolated_org(self, org_name):
from sentry.models import Group, Event
org = self.utils.create_org(org_name, owner=self.me)
try:
yield org
finally:
# Enforce safe cascades into Group/Event
Group.objects.filter(
project__organization=org,
).delete()
Event.objects.filter(
project_id__in=org.project_set.values('id'),
).delete()
org.delete()
def request(self, method, path, headers=None, data=None, api_key=None, format='json'):
if api_key is None:
api_key = self.api_key
path = '/api/0/' + path.lstrip('/')
headers = dict(headers or {})
request_is_json = True
body = None
files = None
was_multipart = False
if data is not None:
if format == 'json':
body = json.dumps(data, sort_keys=True)
headers['Content-Type'] = 'application/json'
elif format == 'multipart':
files = {}
for key, value in data.items():
if hasattr(value, 'read') or isinstance(value, tuple):
files[key] = value
del data[key]
was_multipart = True
body = data
req_headers = dict(headers)
req_headers['Host'] = 'sentry.io'
req_headers['Authorization'] = \
'Basic %s' % base64.b64encode('%s:' % (api_key.key.encode('utf-8')))
url = 'http://127.0.0.1:%s%s' % (settings.SENTRY_APIDOCS_WEB_PORT, path, )
response = requests.request(
method=method, url=url, files=files, headers=req_headers, data=body
)
response_headers = dict(response.headers)
# Don't want those
response_headers.pop('server', None)
response_headers.pop('date', None)
if response.headers.get('Content-Type') == 'application/json':
response_data = response.json()
is_json = True
else:
response_data = response.text
is_json = False
if was_multipart:
headers['Content-Type'] = response.request.headers['content-type']
data = response.request.body
request_is_json = False
rv = {
'request': {
'method': method,
'path': path,
'headers': headers,
'data': data,
'is_json': request_is_json,
},
'response': {
'headers': response_headers,
'status': response.status_code,
'reason': response.reason,
'data': response_data,
'is_json': is_json,
}
}
self.requests.append(rv)
return rv
def to_json(self):
doc = extract_documentation(self.func)
title, text = extract_title_and_text(doc)
return {
'ident': self.ident,
'requests': self.requests,
'title': title,
'text': text,
}
|
the-stack_0_9676 | import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="better-ffmpeg-progress",
version="2.0.0",
author="GitHub.com/CrypticSignal",
author_email="[email protected]",
description="Run FFmpeg & see percentage progress + ETA.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CrypticSignal/better-ffmpeg-progress",
packages=["better_ffmpeg_progress"],
install_requires=["ffmpeg-python", "tqdm"],
python_requires=">=3.6",
keywords=["ffmpeg", "progress"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
) |
the-stack_0_9677 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
from six.moves import reduce
from .. import core
from ..layers import utils
from ..layers import nn as F
from .. import dygraph_utils
from . import layers
from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator, default_main_program
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant, NumpyArrayInitializer
from .. import unique_name
from .layer_object_helper import LayerObjectHelper
from ..data_feeder import check_variable_and_dtype, check_type
import numpy as np
import numbers
import logging
import paddle.utils.deprecated as deprecated
__all__ = [
'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding',
'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu',
'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm',
'SpectralNorm', 'TreeConv', 'Flatten'
]
class Conv2D(layers.Layer):
"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \\sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output
feature map.
filter_size (int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding (int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Raises:
ValueError: if ``use_cudnn`` is not a bool value.
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D
import numpy as np
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
conv2d = Conv2D(3, 2, 3)
data = to_variable(data)
conv = conv2d(data)
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__()
self._num_channels = num_channels
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding')
self._dilation = utils.convert_to_list(dilation, 2, 'dilation')
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if (self._num_channels == self._groups and
num_filters % self._num_channels == 0 and
not self._use_cudnn and not self._use_mkldnn):
self._l_type = 'depthwise_conv2d'
else:
self._l_type = 'conv2d'
self._num_channels = num_channels
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 2, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[
1] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
if in_dygraph_mode() and self._l_type == 'conv2d':
attrs = ('strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups
if self._groups else 1, 'use_cudnn', self._use_cudnn,
'use_mkldnn', self._use_mkldnn)
out = core.ops.conv2d(input, self.weight, *attrs)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
inputs = {
'Input': [input],
'Filter': [self.weight],
}
attrs = {
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': self._use_mkldnn,
}
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'Conv2D')
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={
'Input': input,
'Filter': self.weight,
},
outputs={"Output": pre_bias},
attrs=attrs)
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1,
'use_mkldnn': self._use_mkldnn})
else:
pre_act = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_act, act=self._act)
class Conv3D(layers.Layer):
"""
**Convlution3D Layer**
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output image channel.
filter_size (int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square, filter_size_depth = filter_size_height
= filter_size_width = filter_size.
stride (int|tuple, optional): The stride size. If stride is a tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding (int|tuple, optional): The padding size. If padding is a tuple, it must
contain three integers, (padding_D, padding_H, padding_W). Otherwise, the
padding_D = padding_H = padding_W = padding. The default value is 0.
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3d = fluid.dygraph.nn.Conv3D(
num_channels=3, num_filters=2, filter_size=3, act="relu")
ret = conv3d(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv3D, self).__init__()
self._num_channels = num_channels
self._groups = groups
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._act = act
self._use_cudnn = use_cudnn
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='conv3d',
inputs={
'Input': input,
'Filter': self.weight,
},
outputs={"Output": pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': False
})
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class Conv3DTranspose(layers.Layer):
"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
image channel.
filter_size(int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
padding(int|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
The default value is 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose(
num_channels=3,
num_filters=12,
filter_size=12,
use_cudnn=False)
ret = conv3dTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
super(Conv3DTranspose, self).__init__()
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._param_attr = param_attr
self._num_channels = num_channels
self._filter_size = filter_size
self._groups = 1 if groups is None else groups
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._bias_attr = bias_attr
self._act = act
self._dtype = dtype
self._filter_size = utils.convert_to_list(
self._filter_size, 3, 'conv3d_transpose.filter_size')
filter_shape = [self._num_channels, self._num_filters // self._groups
] + self._filter_size
self.weight = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="conv3d_transpose",
inputs={'Input': [input],
'Filter': [self.weight]},
outputs={'Output': pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn
})
if self._bias_attr:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_act, act=self._act)
class Pool2D(layers.Layer):
"""
:alias_main: paddle.nn.Pool2D
:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D
:old_api: paddle.fluid.dygraph.Pool2D
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,
pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
- Input:
Input shape: :math:`(N, C, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C, H_{out}, W_{out})`
If ``ceil_mode`` = False:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
If ``ceil_mode`` = True:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
If ``exclusive`` = False:
.. math::
hstart &= i * strides[0] - paddings[0] \\\\
hend &= hstart + ksize[0] \\\\
wstart &= j * strides[1] - paddings[1] \\\\
wend &= wstart + ksize[1] \\\\
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
If ``exclusive`` = True:
.. math::
hstart &= max(0, i * strides[0] - paddings[0])\\\\
hend &= min(H, hstart + ksize[0]) \\\\
wstart &= max(0, j * strides[1] - paddings[1]) \\\\
wend & = min(W, wstart + ksize[1]) \\\\
Output(i ,j) & = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Parameters:
pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int. Default: -1.
pool_type(str, optional) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling.
Default: max.
pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,
the pool stride size will be a square of an int. Default: 1.
pool_padding (int or list or tuple, optional): The padding size for pooling operation.
If ``pool_padding`` is a tuple,
it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).
Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.
global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,
kernel size and paddings will be ignored. Default: False.
use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.
ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.
False is the default. If it is set to False, the floor function will be used. Default: False.
exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
``[batch_size, input_channels, input_height, input_width]``. When it is `"NHWC"`, the data is
stored in the order of: ``[batch_size, input_height, input_width, input_channels]``
Returns:
None
Raises:
ValueError: If ``pool_type`` is not "max" nor "avg".
ValueError: If ``global_pooling`` is False and ``pool_size`` is -1.
ValueError: If ``use_cudnn`` is not a bool value.
ValueError: If ``data_format`` is not "NCHW" nor "NHWC".
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
with fluid.dygraph.guard():
data = numpy.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D(pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
pool2d_res = pool2d(to_variable(data))
"""
def __init__(self,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
exclusive=True,
data_format="NCHW"):
data_format = data_format.upper() # supprt NHWC, nhwc, etc.
pool_type = pool_type.lower() # supprt max, Max, etc.
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When the global_pooling is False, pool_size must be passed "
"and be a valid value. Received pool_size: " + str(pool_size))
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
super(Pool2D, self).__init__()
self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
self._pool_padding = utils.convert_to_list(pool_padding, 2,
'pool_padding')
self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
self._global_pooling = global_pooling
self._use_cudnn = use_cudnn
self._ceil_mode = ceil_mode
self._exclusive = exclusive
self._data_format = data_format
self._l_type = 'pool2d'
def forward(self, input):
if in_dygraph_mode():
attrs = ('pooling_type', self._pool_type, 'ksize', self._pool_size,
'global_pooling', self._global_pooling, 'strides',
self._pool_stride, 'paddings', self._pool_padding,
'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode,
'use_mkldnn', self._use_mkldnn, 'exclusive',
self._exclusive, 'data_format', self._data_format)
return core.ops.pool2d(input, *attrs)
check_variable_and_dtype(
input, 'input', ['int8', 'uint8', 'float16', 'float32', 'float64'],
'Pool2D')
attrs = {
"pooling_type": self._pool_type,
"ksize": self._pool_size,
"global_pooling": self._global_pooling,
"strides": self._pool_stride,
"paddings": self._pool_padding,
"use_cudnn": self._use_cudnn,
"ceil_mode": self._ceil_mode,
"use_mkldnn": self._use_mkldnn,
"exclusive": self._exclusive,
"data_format": self._data_format,
}
inputs = {"X": [input]}
pool_out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs=attrs)
return pool_out
class Linear(layers.Layer):
"""
Fully-connected linear transformation layer:
.. math::
Out = Act({XW + b})
where :math:`X` is the input Tensor, :math:`W` and :math:`b` are weight and bias respectively.
Linear layer takes only one ``Tensor`` input.
The Linear layer multiplies input tensor with weight matrix and
produces an output Tensor of shape [N, *, `output_dim`],
where N is batch size and `*` means any number of additional dimensions.
If ``bias_attr`` is not None, a bias variable will be created and added to the output.
Finally, if ``act`` is not None, it will be applied to the output as well.
Parameters:
input_dim(int): The number of input units in this layer.
output_dim(int): The number of output units in this layer.
param_attr(ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable
weights(Parameter) of this layer. Default: None.
bias_attr(ParamAttr or list of ParamAttr, optional): The attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of this layer. Default: None.
dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32".
Attributes:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
res = linear(data) # [30, 10, 64]
"""
def __init__(self,
input_dim,
output_dim,
param_attr=None,
bias_attr=None,
act=None,
dtype="float32"):
super(Linear, self).__init__()
self._act = act
self._dtype = dtype
self.weight = self.create_parameter(
shape=[input_dim, output_dim],
attr=param_attr,
dtype=dtype,
is_bias=False)
self.bias = self.create_parameter(
shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True)
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
def forward(self, input):
if in_dygraph_mode():
pre_bias = _varbase_creator(dtype=input.dtype)
core.ops.matmul(input, self.weight, pre_bias, 'transpose_X', False,
'transpose_Y', False, "alpha", 1, "use_mkldnn",
self._use_mkldnn)
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias,
self.bias,
axis=len(input.shape) - 1,
use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], "Linear")
attrs = {
"transpose_X": False,
"transpose_Y": False,
"alpha": 1,
"use_mkldnn": self._use_mkldnn,
}
inputs = {"X": [input], "Y": [self.weight]}
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="matmul", inputs=inputs, outputs={"Out": tmp}, attrs=attrs)
if self.bias is not None:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={
'axis': len(input.shape) - 1,
'use_mkldnn': self._use_mkldnn
})
else:
pre_activation = tmp
return self._helper.append_activation(pre_activation, act=self._act)
class InstanceNorm(layers.Layer):
"""
This interface is used to construct a callable object of the ``InstanceNorm`` class.
For more details, refer to code examples.
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
one. If it is set to False, will not create param_attr. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
If it is set to False, will not create bias_attr. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle
# x's shape is [1, 3, 1, 2]
x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
instanceNorm = paddle.nn.InstanceNorm(3)
ret = instanceNorm(x)
# ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995]
print(ret)
"""
def __init__(self,
num_channels,
epsilon=1e-5,
param_attr=None,
bias_attr=None,
dtype='float32'):
super(InstanceNorm, self).__init__()
if param_attr == False or bias_attr == False:
assert bias_attr == param_attr, "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if param_attr != False and bias_attr != False:
self.scale = self.create_parameter(
attr=self._param_attr,
shape=[num_channels],
dtype=self._dtype,
default_initializer=Constant(1.0),
is_bias=False)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[num_channels],
dtype=self._dtype,
default_initializer=Constant(0.0),
is_bias=True)
else:
self.scale = None
self.bias = None
def forward(self, input):
if in_dygraph_mode():
out, _, _ = core.ops.instance_norm(input, self.scale, self.bias,
'epsilon', self._epsilon)
return out
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"InstanceNorm")
attrs = {"epsilon": self._epsilon}
if self.scale and self.bias:
inputs = {"X": [input], "Scale": [self.scale], "Bias": [self.bias]}
else:
inputs = {"X": [input]}
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
instance_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
outputs = {
"Y": [instance_norm_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
self._helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return instance_norm_out
class BatchNorm(layers.Layer):
"""
:alias_main: paddle.nn.BatchNorm
:alias: paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm
:old_api: paddle.fluid.dygraph.BatchNorm
This interface is used to construct a callable object of the ``BatchNorm`` class.
For more details, refer to code examples.
It implements the function of the Batch Normalization Layer and can be used
as a normalizer function for conv2d and fully connected operations.
The data is normalized by the mean and variance of the channel based on the current batch data.
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
When use_global_stats = False, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are the statistics of one mini-batch.
Calculated as follows:
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
- :math:`x` : mini-batch data
- :math:`m` : the size of the mini-batch data
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global or running statistics (moving_mean and moving_variance). It usually got from the
pre-trained model. Calculated as follows:
.. math::
moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\
moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\
The normalization function formula is as follows:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
- :math:`\\epsilon` : add a smaller value to the variance to prevent division by zero
- :math:`\\gamma` : trainable proportional parameter
- :math:`\\beta` : trainable deviation parameter
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
act(str, optional): Activation to be applied to the output of batch normalization. Default: None.
is_test (bool, optional): A flag indicating whether it is in test phrase or not.
This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
Default: False.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
data_layout(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.
moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.
moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.
do_model_average_for_mean_and_var(bool, optional): Whether parameter mean and variance should do model
average when model average is enabled. Default: True.
use_global_stats(bool, optional): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period. Default: False.
trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when
setting trainable_statistics True, mean and variance will be calculated by current batch statistics.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
batch_norm = fluid.BatchNorm(10)
hidden1 = batch_norm(x)
"""
def __init__(self,
num_channels,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
dtype='float32',
data_layout='NCHW',
in_place=False,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
trainable_statistics=False):
super(BatchNorm, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
if dtype == "float16":
self._dtype = "float32"
else:
self._dtype = dtype
param_shape = [num_channels]
# create parameter
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
self.weight.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
self.bias.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.
self._mean = self.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._mean.stop_gradient = True
self._variance = self.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._variance.stop_gradient = True
self._in_place = in_place
self._data_layout = data_layout
self._momentum = momentum
self._epsilon = epsilon
self._is_test = is_test
self._fuse_with_relu = False
self._use_global_stats = use_global_stats
self._trainable_statistics = trainable_statistics
def forward(self, input):
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
if in_dygraph_mode():
attrs = ("momentum", self._momentum, "epsilon", self._epsilon,
"is_test", not self.training, "data_layout",
self._data_layout, "use_mkldnn", self._use_mkldnn,
"fuse_with_relu", self._fuse_with_relu, "use_global_stats",
self._use_global_stats, 'trainable_statistics',
self._trainable_statistics)
batch_norm_out, _, _, _, _, _ = core.ops.batch_norm(
input, self.weight, self.bias, self._mean, self._variance,
mean_out, variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'BatchNorm')
attrs = {
"momentum": self._momentum,
"epsilon": self._epsilon,
"is_test": self._is_test,
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats,
"trainable_statistics": self._trainable_statistics,
}
inputs = {
"X": [input],
"Scale": [self.weight],
"Bias": [self.bias],
"Mean": [self._mean],
"Variance": [self._variance]
}
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(
self._dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [mean_out],
"VarianceOut": [variance_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
self._helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act)
class Dropout(layers.Layer):
"""
This interface is used to construct a callable object of the ``Dropout`` class.
For more details, refer to code examples.
Drop or keep each element of input independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
Dropout layer can be removed for efficiency concern.
Parameters:
p (float, optional): Probability of setting units to zero. Default: 0.5
seed (int, optional): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training. Default: None.
dropout_implementation(string, optional): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - p)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - p )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is p)
is_test (bool, optional): A flag indicating whether it is in test phrase or not.
This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
m = fluid.dygraph.Dropout(p=0.5)
droped_train = m(x)
# switch to eval mode
m.eval()
droped_eval = m(x)
"""
def __init__(self,
p=0.5,
seed=None,
dropout_implementation="downgrade_in_infer",
is_test=False):
super(Dropout, self).__init__()
assert isinstance(p, (float, int)), "p argument should be a number"
assert 0 <= p <= 1, "p argument should between 0 and 1"
self._dropout_prob = p
assert seed is None or isinstance(
seed, int), "seed argument should be None or a integer"
self._seed = seed
assert dropout_implementation in (
'downgrade_in_infer', 'upscale_in_train'
), "dropout_implementation argument should be 'downgrade_in_infer' or 'upscale_in_train'"
self._dropout_implementation = dropout_implementation
self._is_test = is_test
def forward(self, input):
prog = default_main_program()
if (self._seed is None or self._seed == 0) and prog.random_seed != 0:
self._seed = prog.random_seed
attrs = {
'dropout_prob': self._dropout_prob,
'is_test': not self.training
if in_dygraph_mode() else self._is_test,
'fix_seed': self._seed is not None,
'seed': self._seed if self._seed is not None else 0,
'dropout_implementation': self._dropout_implementation,
}
if in_dygraph_mode():
attrs = sum(attrs.items(), ())
out, mask = core.ops.dropout(input, *attrs)
return out
out = self._helper.create_variable_for_type_inference(dtype=input.dtype)
mask = self._helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
self._helper.append_op(
type='dropout',
inputs={'X': [input]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
class Embedding(layers.Layer):
"""
:alias_main: paddle.nn.Embedding
:alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding
:old_api: paddle.fluid.dygraph.Embedding
**Embedding Layer**
This interface is used to construct a callable object of the ``Embedding`` class.
For specific usage, refer to code examples. It implements the function of the Embedding Layer.
This layer is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[1, 3], [2, 4], [4, 127]
input.shape = [3, 2]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Parameters:
size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size
of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(np.dtype|core.VarDesc.VarType|str): It refers to the data type of output Tensor.
It must be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy as np
# example 1
inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64')
inp_word.shape # [2, 3]
dict_size = 20
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
static_rlt3.shape # [2, 3, 32]
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
size=[128, 100],
param_attr= w_param_attrs,
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
"""
def __init__(self,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
super(Embedding, self).__init__()
self._size = size
self._is_sparse = is_sparse
self._is_distributed = is_distributed
self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
self._param_attr = param_attr
self._dtype = dtype
self._remote_prefetch = self._is_sparse and (not self._is_distributed)
if self._remote_prefetch:
assert self._is_sparse is True and self._is_distributed is False
self.weight = self.create_parameter(
attr=self._param_attr,
shape=self._size,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
if in_dygraph_mode():
return core.ops.lookup_table_v2(
self.weight, input, 'is_sparse', self._is_sparse,
'is_distributed', self._is_distributed, 'remote_prefetch',
self._remote_prefetch, 'padding_idx', self._padding_idx)
check_variable_and_dtype(input, 'input', ['int64'], 'Embedding')
attrs = {
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='lookup_table_v2',
inputs={'Ids': input,
'W': self.weight},
outputs={'Out': out},
attrs=attrs)
return out
class LayerNorm(layers.Layer):
"""
:alias_main: paddle.nn.LayerNorm
:alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm
:old_api: paddle.fluid.dygraph.LayerNorm
This interface is used to construct a callable object of the ``LayerNorm`` class.
For more details, refer to code examples.
It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Parameters:
normalized_shape(int or list or tuple): Input shape from an expected input of
size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
If it is a single integer, this module will normalize over the last dimension
which is expected to be of that specific size.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy
x = numpy.random.random((3, 32, 32)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
layerNorm = fluid.LayerNorm([32, 32])
ret = layerNorm(x)
"""
def __init__(self,
normalized_shape,
scale=True,
shift=True,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
dtype='float32'):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = [normalized_shape]
self._normalized_shape = list(normalized_shape)
self._scale = scale
self._shift = shift
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._dtype = dtype
param_shape = [np.prod(self._normalized_shape)]
if self._scale:
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
else:
if self._param_attr:
logging.warn("param_attr are only available with scale is True")
self.weight = None
if self._shift:
assert self._bias_attr is not False
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
else:
if self._bias_attr:
logging.warn("bias_attr are only available with shift is True")
self.bias = None
def forward(self, input):
input_shape = list(input.shape)
input_ndim = len(input_shape)
normalized_ndim = len(self._normalized_shape)
self._begin_norm_axis = input_ndim - normalized_ndim
if input_ndim < normalized_ndim or input_shape[
self._begin_norm_axis:] != self._normalized_shape:
str_normalized_shape = str(self._normalized_shape)
raise ValueError(
'Given normalized_shape is ' + str_normalized_shape +
', expected input with shape [*, ' + str_normalized_shape[
1:] + ', but got input shape ' + str(input_shape))
if in_dygraph_mode():
pre_act, _, _ = core.ops.layer_norm(
input, self.weight, self.bias, 'epsilon', self._epsilon,
'begin_norm_axis', self._begin_norm_axis)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'LayerNorm')
inputs = dict()
inputs['X'] = [input]
if self._scale:
inputs['Scale'] = [self.weight]
if self._shift:
inputs['Bias'] = [self.bias]
attrs = {
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
}
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
layer_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
})
return self._helper.append_activation(layer_norm_out, act=self._act)
class GRUUnit(layers.Layer):
"""
**GRU unit layer**
It creates a callable object from GRUUnit class.
If origin_mode is True, then the equation of a gru step is from paper
`Learning Phrase Representations using RNN Encoder-Decoder for Statistical
Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
If origin_mode is False, then the equation of a gru step is from paper
`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot((1-u_t), h_{t-1}) + dot(u_t, m_t)
The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms
of the equation above, the :math:`z_t` is split into 3 parts -
:math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to
implement a full GRU unit operator for an input, a fully
connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.
The terms :math:`u_t` and :math:`r_t` represent the update and reset gates
of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is
an intermediate candidate hidden output, which is denoted by :math:`m_t`.
This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`
and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.
Parameters:
size (int): The input dimension value.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
hidden-hidden weight matrix.
**Note**:
1. The shape of the weight matrix is :math:`[T, 3*D]`, where D is the hidden size.
2. All elements in the weight matrix can be divided into two parts. The first
part are weights of the update gate and reset gate with shape :math:`[D, 2*D]`,
and the second part are weights for candidate hidden state with shape :math:`[D, D]`.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default
value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias
of GRU.Note that the bias with :math:`[1, 3*D]` concatenates
the bias in the update gate, reset gate and candidate calculations.
If it is set to False, no bias will be applied to the update gate,
reset gate and candidate calculations. If it is set to None or one
attribute of ParamAttr, gru_unit will create ParamAttr as
bias_attr. If the Initializer of the bias_attr is not set, the bias
is initialized zero. The default value is None.
activation (str): The activation type for cell (actNode).
The default value is 'tanh'.
gate_activation (str): The activation type for gates (actGate).
The default value is 'sigmoid'.
dtype(str): The dtype of the layers. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
tuple: The hidden value, reset-hidden value and gate values. The hidden value
is a 2-D tensor with shape :math:`[T, D]` . The reset-hidden value is a
2-D tensor with shape :math:`[T, D]` . The gate value is a 2-D tensor with
shape :math:`[T, 3*D]`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy
lod = [[2, 4, 3]]
D = 5
T = sum(lod[0])
input = numpy.random.rand(T, 3 * D).astype('float32')
hidden_input = numpy.random.rand(T, D).astype('float32')
with fluid.dygraph.guard():
x = numpy.random.random((3, 32, 32)).astype('float32')
gru = fluid.dygraph.GRUUnit(size=D * 3)
dy_ret = gru(
base.to_variable(input), base.to_variable(hidden_input))
"""
def __init__(self,
size,
param_attr=None,
bias_attr=None,
activation='tanh',
gate_activation='sigmoid',
origin_mode=False,
dtype='float32'):
super(GRUUnit, self).__init__()
self._bias_attr = bias_attr
activation_dict = dict(
identity=0,
sigmoid=1,
tanh=2,
relu=3, )
self.activation = activation_dict[activation]
self.gate_activation = activation_dict[gate_activation]
self._dtype = dtype
size = size // 3
# create weight
self.weight = self.create_parameter(
attr=param_attr, shape=[size, 3 * size], dtype=dtype)
# create bias
bias_size = [1, 3 * size]
self._bias_size = bias_size
self.bias = self.create_parameter(
attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
def forward(self, input, hidden):
if in_dygraph_mode():
gate, reset_hidden_pre, updated_hidden = core.ops.gru_unit(
input, hidden, self.weight, self.bias, 'activation',
self.activation, 'gate_activation', self.gate_activation)
return updated_hidden, reset_hidden_pre, gate
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'GRUUnit')
check_variable_and_dtype(hidden, 'hidden', ['float32', 'float64'],
'GRUUnit')
inputs = {
'Input': [input],
'HiddenPrev': [hidden],
'Weight': [self.weight]
}
if self.bias is not None:
inputs['Bias'] = [self.bias]
gate = self._helper.create_variable_for_type_inference(self._dtype)
reset_hidden_pre = self._helper.create_variable_for_type_inference(
self._dtype)
updated_hidden = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type='gru_unit',
inputs=inputs,
outputs={
'Gate': gate,
'ResetHiddenPrev': reset_hidden_pre,
'Hidden': updated_hidden,
},
attrs={
'activation': self.activation,
'gate_activation': self.gate_activation,
})
return updated_hidden, reset_hidden_pre, gate
class NCE(layers.Layer):
"""
This interface is used to construct a callable object of the ``NCE`` class.
For more details, refer to code examples.
It implements the function of the ``NCE`` loss function.
By default this function uses a uniform distribution for sampling, and it
compute and return the noise-contrastive estimation training loss. See
`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ .
Parameters:
num_total_classes (int): Total number of classes in all samples.
dim (int): Dimension of input (possibly embedding dim).
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of nce. If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
num_neg_samples (int, optional): The number of negative classes. The default value is 10.
sampler (str, optional): The sampler used to sample class from negative classes.
It can be 'uniform', 'log_uniform' or 'custom_dist'.
default: 'uniform'.
custom_dist (float[], optional): A float[] with size=num_total_classes.
It is used when sampler is set to 'custom_dist'.
custom_dist[i] is the probability of i-th class to be sampled.
Default: None.
seed (int, optional): The seed used in sampler. Default: 0.
is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
window_size = 5
dict_size = 20
label_word = int(window_size // 2) + 1
inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
with fluid.dygraph.guard():
words = []
for i in range(window_size):
words.append(fluid.dygraph.base.to_variable(inp_word[i]))
emb = fluid.Embedding(
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
embs3 = []
for i in range(window_size):
if i == label_word:
continue
emb_rlt = emb(words[i])
embs3.append(emb_rlt)
embs3 = fluid.layers.concat(input=embs3, axis=1)
nce = fluid.NCE(
num_total_classes=dict_size,
dim=embs3.shape[1],
num_neg_samples=2,
sampler="custom_dist",
custom_dist=nid_freq_arr.tolist(),
seed=1,
param_attr='nce.w',
bias_attr='nce.b')
wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
nce_loss3 = nce(embs3, wl)
"""
def __init__(self,
num_total_classes,
dim,
sample_weight=None,
param_attr=None,
bias_attr=None,
num_neg_samples=None,
sampler="uniform",
custom_dist=None,
seed=0,
is_sparse=False,
dtype='float32'):
super(NCE, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._num_total_classes = num_total_classes
self._dtype = dtype
self._inputs = dict()
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
if sampler == "uniform":
sampler = 0
elif sampler == "log_uniform":
sampler = 1
elif sampler == "custom_dist":
assert custom_dist is not None
# assert isinstance(custom_dist, Variable)
custom_dist_len = len(custom_dist)
alias_probs_ = [0] * custom_dist_len
alias_ = [0] * custom_dist_len
bigs = []
littles = []
for i in range(custom_dist_len):
normal_prob = custom_dist[i] * custom_dist_len
if normal_prob - 1.0 > 0:
bigs.append((i, normal_prob))
elif 1.0 - normal_prob > 0:
littles.append((i, normal_prob))
else:
alias_probs_[i] = normal_prob
alias_[i] = -1
while len(bigs) and len(littles):
big = bigs.pop(0)
little = littles.pop(0)
big_idx = big[0]
big_prob = big[1]
alias_probs_[little[0]] = little[1]
alias_[little[0]] = big_idx
big_left = big[1] + little[1] - 1
if big_left - 1.0 > 0:
bigs.append((big_idx, big_left))
elif 1.0 - big_left > 0:
littles.append((big_idx, big_left))
else:
alias_probs_[big_idx] = big_left
alias_[big_idx] = -1
if len(bigs):
big = bigs.pop(0)
alias_probs_[big[0]] = 1.0
alias_[big[0]] = -1
if len(littles):
little = littles.pop(0)
alias_probs_[little[0]] = 1.0
alias_[little[0]] = -1
def _init_by_numpy_array(numpy_array):
ret = self.create_parameter(
attr=ParamAttr(),
shape=numpy_array.shape,
dtype=numpy_array.dtype,
default_initializer=NumpyArrayInitializer(numpy_array))
ret.stop_gradient = True
return ret
self._inputs['CustomDistProbs'] = _init_by_numpy_array(
np.array(custom_dist).astype('float32'))
self._inputs['CustomDistAlias'] = _init_by_numpy_array(
np.array(alias_).astype('int32'))
self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array(
np.array(alias_probs_).astype('float32'))
sampler = 2
else:
raise Exception("Unsupported sampler type.")
if num_neg_samples is None:
num_neg_samples = 10
else:
num_neg_samples = int(num_neg_samples)
self._num_neg_samples = num_neg_samples
remote_prefetch = is_sparse
print(
"With sparse mode, if your models has only small parameter prefetch may cause speed down"
)
self._attrs = {
'num_total_classes': int(num_total_classes),
'num_neg_samples': num_neg_samples,
'seed': seed,
'sampler': sampler,
'is_sparse': is_sparse,
'remote_prefetch': remote_prefetch
}
self.weight = self.create_parameter(
attr=self._param_attr,
shape=[self._num_total_classes, dim],
is_bias=False,
dtype=self._dtype)
if self._bias_attr:
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_total_classes, 1],
is_bias=True,
dtype=self._dtype)
self._inputs['Bias'] = self.bias
self._inputs['Weight'] = self.weight
def forward(self, input, label, sample_weight=None):
check_variable_and_dtype(input, "input", ['float32', 'float64'], "NCE")
check_variable_and_dtype(label, "label", ['int64'], "NCE")
check_type(sample_weight, 'sample_weight', (Variable, type(None)),
'NCE')
assert isinstance(input, Variable)
assert isinstance(label, Variable)
self._inputs['Input'] = input
self._inputs['Label'] = label
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
cost = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_logits = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_labels = self._helper.create_variable_for_type_inference(
dtype=label.dtype)
self._helper.append_op(
type='nce',
inputs=self._inputs,
outputs={
'Cost': cost,
'SampleLogits': sample_logits,
'SampleLabels': sample_labels
},
attrs=self._attrs)
return cost / (self._num_neg_samples + 1)
class PRelu(layers.Layer):
"""
This interface is used to construct a callable object of the ``PRelu`` class.
For more details, refer to code examples.
It implements three activation methods of the ``PRelu`` activation function.
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
Parameters:
mode (str): The mode for weight sharing. It supports all, channel
and element. all: all elements share same weight
channel:elements in a channel share same weight
element:each element has a weight
channel (int, optional): The number of channels.
This argument is required when mode is "channel".
Default: None.
input_shape (list or tuple, optional): The shape of input.
This argument is required when mode is "element".
Default: None.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
weight (alpha). Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with fluid.dygraph.guard():
inp_np = to_variable(inp_np)
prelu0 = fluid.PRelu(
mode='all',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt0 = prelu0(inp_np)
prelu1 = fluid.PRelu(
mode='channel',
channel=200,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt1 = prelu1(inp_np)
prelu2 = fluid.PRelu(
mode='element',
input_shape=inp_np.shape,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt2 = prelu2(inp_np)
"""
def __init__(self,
mode,
channel=None,
input_shape=None,
param_attr=None,
dtype='float32'):
# need specify name_scope since snake-cased 'PRelu' is 'p_relu'
super(PRelu, self).__init__(name_scope='prelu')
self._mode = mode
self._param_attr = param_attr
self._dtype = dtype
if mode == 'all':
self._alpha_shape = [1]
elif mode == 'channel':
assert isinstance(
channel,
int), "channel argument is required when mode is 'channel'."
#NOTE(zhiqiu): The _alpha_shape should be [1, channel] + [1] * len(input_shape[2:]), not [1, channel, 1, 1].
# However, the suffix 1 in the list is useless, since the tensor is viewed as one demension array during kernel calculation.
# And, input_shape is not required when mode is 'channel', so it is simplified.
#NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
self._alpha_shape = [1, channel, 1, 1]
elif mode == 'element':
assert isinstance(input_shape, (
list, tuple
)), "input_shape argument is required when mode is 'element'."
self._alpha_shape = [1] + list(input_shape)[1:]
else:
raise ValueError('mode should be one of all, channel, element.')
self.weight = self.create_parameter(
attr=self._param_attr,
shape=self._alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(1.0))
def forward(self, input):
check_variable_and_dtype(input, 'input', ['float32'], 'PRelu')
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="prelu",
inputs={"X": input,
'Alpha': self.weight},
attrs={"mode": self._mode},
outputs={"Out": out})
return out
class BilinearTensorProduct(layers.Layer):
"""
:alias_main: paddle.nn.BilinearTensorProduct
:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct
:old_api: paddle.fluid.dygraph.BilinearTensorProduct
**Add Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y`.
Parameters:
input1_dim (int): The dimension of each first input.
input2_dim (int): The dimension of each second input.
output_dim (int): The dimension of output of this layer.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
act (str, optional): Activation to be applied to the output of this layer. The default value is None.
param_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
this layer. The default value is None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. The default value is None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
Variable: A 2-D Tensor of shape [batch_size, size].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct(
input1_dim=5, input2_dim=4, output_dim=1000)
ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1),
fluid.dygraph.base.to_variable(layer2))
"""
def __init__(self,
input1_dim,
input2_dim,
output_dim,
name=None,
act=None,
param_attr=None,
bias_attr=None,
dtype='float32'):
super(BilinearTensorProduct, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._name = name
self._input1_dim = input1_dim
self._input2_dim = input2_dim
self._output_dim = output_dim
self._inputs = dict()
self._dtype = dtype
param_shape = [self._output_dim, self._input1_dim, self._input2_dim]
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=False)
bias_size = [1, self._output_dim]
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=bias_size,
dtype=self._dtype,
is_bias=True)
@deprecated(
since="2.0.0",
update_to="paddle.nn.Bilinear",
reason="New name and new args in Bilinear, easier to use.")
def forward(self, x, y):
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'BilinearTensorProduct')
check_variable_and_dtype(y, 'y', ['float32', 'float64'],
'BilinearTensorProduct')
self._inputs = {"X": x, "Y": y, "Weight": self.weight}
if self.bias is not None:
self._inputs["Bias"] = self.bias
if self._name is not None:
out = self._helper.create_variable(
name=".".join([self.full_name(), self._name]),
dtype=self._dtype,
persistable=False)
else:
out = self._helper.create_variable(
dtype=self._dtype, persistable=False)
self._helper.append_op(
type="bilinear_tensor_product",
inputs=self._inputs,
outputs={"Out": out})
# add activation
return self._helper.append_activation(out, act=self._act)
class Conv2DTranspose(layers.Layer):
"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output
are in NCHW format. Where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of input feature map,
C is the number of output feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
The details of convolution transpose layer, please refer to the following explanation and references
`conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
feature map.
filter_size(int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
output_size(int or tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). None if use
filter_size, padding, and stride to calculate output_size.
if output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None.
padding(int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
stride(int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(
num_channels=32, num_filters=2, filter_size=3)
ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
super(Conv2DTranspose, self).__init__()
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._groups = groups
self._num_channels = num_channels
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._padding = padding
self._stride = stride
self._dilation = dilation
self._filter_size = filter_size
self._output_size = output_size
self._dtype = dtype
if (self._num_channels == self._groups and
self._num_filters == self._num_channels and
not self._use_cudnn):
self._op_type = 'depthwise_conv2d_transpose'
else:
self._op_type = 'conv2d_transpose'
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._stride = utils.convert_to_list(self._stride, 2, 'stride')
self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation')
self._filter_size = utils.convert_to_list(
self._filter_size, 2, 'conv2d_transpose.filter_size')
if self._output_size is None:
self._output_size = []
elif isinstance(self._output_size, list) or isinstance(
self._output_size, int):
self._output_size = utils.convert_to_list(self._output_size, 2,
'output_size')
else:
raise ValueError("output_size should be list or int")
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._groups = 1 if self._groups is None else self._groups
filter_shape = [self._num_channels, self._num_filters // self._groups
] + self._filter_size
self.weight = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
if in_dygraph_mode():
op = getattr(core.ops, self._op_type)
out = op(input, self.weight, 'output_size', self._output_size,
'strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups,
'use_cudnn', self._use_cudnn)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias,
1)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'],
"Conv2DTranspose")
inputs = {'Input': [input], 'Filter': [self.weight]}
attrs = {
'output_size': self._output_size,
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups,
'use_cudnn': self._use_cudnn
}
pre_bias = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
self._helper.append_op(
type=self._op_type,
inputs=inputs,
outputs={'Output': pre_bias},
attrs=attrs)
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
out = self._helper.append_activation(pre_act, act=self._act)
return out
class SequenceConv(layers.Layer):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
Parameters:
name_scope(str): The name of this class.
num_filters (int): number of filters.
filter_size (int): the filter size (H and W). Default: 3.
filter_stride (int): stride of the filter. Default: 1.
padding (bool|None): if True, add paddings. Default: None
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns:
Variable: output of sequence_conv
"""
def __init__(self,
name_scope,
num_filters,
filter_size=3,
filter_stride=1,
padding=None,
bias_attr=None,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "SequenceConv is not supported by dynamic graph mode yet!"
super(SequenceConv, self).__init__(name_scope)
self._num_filters = num_filters
self._filter_size = filter_size
self._filter_stride = filter_stride
self._padding = padding
self._bias_attr = bias_attr
self._param_attr = param_attr
self._act = act
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._filter_size * input.shape[1], self._num_filters]
self.weight = self.create_parameter(
attr=self._param_attr, shape=filter_shape, dtype=self._dtype)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [self.weight],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': self._filter_stride,
'contextStart': -int(self._filter_size // 2),
'contextLength': self._filter_size
})
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class RowConv(layers.Layer):
"""
***Row-convolution operator***
The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2:
http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf
The main motivation is that a bidirectional RNN, useful in DeepSpeech like speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution operator is
different from the 1D sequence convolution, and is computed as follows:
Given an input sequence X of length t and input dimension D, and a filter (W) of size context * D.
More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
Parameters:
name_scope(str): The name of this class.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc. Default: None.
act (str): Non-linear activation to be applied to output variable. Default: None.
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns:
the output(Out) is a LodTensor, which supports variable time-length input sequences.
The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
x = numpy.random.random((16)).astype('float32')
rowConv = fluid.dygraph.nn.RowConv(
'RowConv', future_context_size=2)
ret = rowConv(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
name_scope,
future_context_size,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "RowConv is not supported by dynamic graph mode yet!"
super(RowConv, self).__init__(name_scope)
self._act = act
self._param_attr = param_attr
self._future_context_size = future_context_size
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._future_context_size + 1, input.shape[1]]
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [self.weight]},
outputs={'Out': [out]})
return self._helper.append_activation(out, act=self._act)
class GroupNorm(layers.Layer):
"""
:alias_main: paddle.nn.GroupNorm
:alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm
:old_api: paddle.fluid.dygraph.GroupNorm
This interface is used to construct a callable object of the ``GroupNorm`` class.
For more details, refer to code examples.
It implements the function of the Group Normalization Layer.
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
channels(int): The number of channels of input.
groups(int): The number of groups that divided from channels.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
scale :math:`g`. If it is set to False, no scale will be added to the output units.
If it is set to None, the bias is initialized one. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of group normalization. Default: None.
data_layout(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = np.random.random((8, 32, 32)).astype('float32')
groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4)
ret = groupNorm(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
channels,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
dtype='float32'):
super(GroupNorm, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._epsilon = epsilon
self._channels = channels
self._groups = groups
self._act = act
self._dtype = dtype
if data_layout != 'NCHW':
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [self._channels]
self.weight = self.create_parameter(
attr=self._param_attr or False,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
self.bias = self.create_parameter(
attr=self._bias_attr or False,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
def forward(self, input):
inputs = {'X': input}
if self.bias is not None:
inputs['Bias'] = self.bias
if self.weight is not None:
inputs['Scale'] = self.weight
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
group_norm_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": self._epsilon,
"groups": self._groups})
return self._helper.append_activation(group_norm_out, self._act)
class SpectralNorm(layers.Layer):
"""
:alias_main: paddle.nn.SpectralNorm
:alias: paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm
:old_api: paddle.fluid.dygraph.SpectralNorm
This interface is used to construct a callable object of the ``SpectralNorm`` class.
For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.
This layer calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds.
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Parameters:
weight_shape(list or tuple): The shape of weight parameter.
dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.
eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
weight = np.random.random((2, 8, 32, 32)).astype('float32')
spectralNorm = fluid.dygraph.nn.SpectralNorm(weight.shape, dim=1, power_iters=2)
ret = spectralNorm(fluid.dygraph.base.to_variable(weight))
"""
def __init__(self,
weight_shape,
dim=0,
power_iters=1,
eps=1e-12,
dtype='float32'):
super(SpectralNorm, self).__init__()
self._power_iters = power_iters
self._eps = eps
self._dim = dim
self._dtype = dtype
self._weight_shape = list(weight_shape)
h = self._weight_shape[self._dim]
w = np.prod(self._weight_shape) // h
self.weight_u = self.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.weight_u.stop_gradient = True
self.weight_v = self.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.weight_v.stop_gradient = True
def forward(self, weight):
check_variable_and_dtype(weight, "weight", ['float32', 'float64'],
'SpectralNorm')
inputs = {'Weight': weight, 'U': self.weight_u, 'V': self.weight_v}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": self._dim,
"power_iters": self._power_iters,
"eps": self._eps,
})
return out
class TreeConv(layers.Layer):
"""
This interface is used to construct a callable object of the ``TreeConv`` class.
For more details, refer to code examples.
Tree-Based Convolution is a kind of convolution based on tree structure.
Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),
which is used to classify tree structures, such as Abstract Syntax Tree.
Tree-Based Convolution proposed a kind of data structure called continuous binary tree,
which regards multiway tree as binary tree.
The paper of Tree-Based Convolution Operator is here: `tree-based convolution <https://arxiv.org/abs/1409.5718v1/>`_ .
Parameters:
feature_size(int): last dimension of nodes_vector.
output_size(int): output feature width.
num_filters(int, optional): number of filters, Default: 1.
max_depth(int, optional): max depth of filters, Default: 2.
act(str, optional): activation function, Default: tanh.
param_attr(ParamAttr, optional): the parameter attribute for the filters, Default: None.
bias_attr(ParamAttr, optional): the parameter attribute for the bias of this layer, Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')
edge_set = numpy.random.random((1, 9, 2)).astype('int32')
treeConv = fluid.dygraph.nn.TreeConv(
feature_size=5, output_size=6, num_filters=1, max_depth=2)
ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))
"""
def __init__(self,
feature_size,
output_size,
num_filters=1,
max_depth=2,
act='tanh',
param_attr=None,
bias_attr=None,
name=None,
dtype='float32'):
super(TreeConv, self).__init__()
self._name = name
self._feature_size = feature_size
self._output_size = output_size
self._act = act
self._max_depth = max_depth
self._num_filters = num_filters
self._bias_attr = bias_attr
self._param_attr = param_attr
self._dtype = dtype
w_shape = [self._feature_size, 3, self._output_size, self._num_filters]
if self._bias_attr:
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=w_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, nodes_vector, edge_set):
check_type(nodes_vector, 'nodes_vector', (Variable), 'TreeConv')
check_type(edge_set, 'edge_set', (Variable), 'TreeConv')
if self._name:
out = self.create_variable(
name=self._name, dtype=self._dtype, persistable=False)
else:
out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='tree_conv',
inputs={
'NodesVector': nodes_vector,
'EdgeSet': edge_set,
'Filter': self.weight
},
outputs={'Out': out, },
attrs={'max_depth': self._max_depth})
if self._bias_attr:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [out],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={'axis': 1})
else:
pre_activation = out
return self._helper.append_activation(pre_activation, act=self._act)
class Flatten(layers.Layer):
"""
:alias_main: paddle.nn.Flatten
:alias: paddle.nn.Flatten,paddle.nn.layer.Flatten,paddle.nn.layer.common.Flatten
This interface is used to construct a callable object of the ``FLatten`` class.
For more details, refer to code examples.
It implements flatten a contiguous range of dims into a tensor.
Equation:
Parameters:
start_axis(int): first dim to flatten (default = 1)
stop_axis(int): last dim to flatten (default = -1).
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
inp_np = paddle.to_tensor(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np)
"""
def __init__(self, start_axis=1, stop_axis=-1):
super(Flatten, self).__init__()
self.start_axis = start_axis
self.stop_axis = stop_axis
def forward(self, input):
out = paddle.tensor.manipulation.flatten(
input, start_axis=self.start_axis, stop_axis=self.stop_axis)
return out
|
the-stack_0_9678 | import os
os.environ['DGLBACKEND'] = 'mxnet'
import mxnet as mx
from mxnet import nd, gluon, autograd
import dgl
import numpy as np
import pandas as pd
import time
import logging
import pickle
import math
from estimator_fns import *
from graph import *
from data import *
from utils import *
from model.mxnet import *
from sampler import *
def normalize(feature_matrix):
mean = nd.mean(feature_matrix, axis=0)
stdev = nd.sqrt(nd.sum((feature_matrix - mean)**2, axis=0)/feature_matrix.shape[0])
return (feature_matrix - mean) / stdev
def get_dataloader(data_size, batch_size, mini_batch=True):
batch_size = batch_size if mini_batch else data_size
train_dataloader = gluon.data.BatchSampler(gluon.data.RandomSampler(data_size), batch_size, 'keep')
test_dataloader = gluon.data.BatchSampler(gluon.data.SequentialSampler(data_size), batch_size, 'keep')
return train_dataloader, test_dataloader
def train(model, trainer, loss, features, labels, train_loader, test_loader, train_g, test_g, train_mask, valid_mask, test_mask, ctx, n_epochs, batch_size, output_dir, thresh, scale_pos_weight, compute_metrics=True, mini_batch=True):
duration = []
for epoch in range(n_epochs):
tic = time.time()
loss_val = 0.
for n, batch in enumerate(train_loader):
# logging.info("Iteration: {:05d}".format(n))
node_flow, batch_nids = train_g.sample_block(nd.array(batch).astype('int64'))
batch_indices = nd.array(batch, ctx=ctx)
with autograd.record():
pred = model(node_flow, features[batch_nids.as_in_context(ctx)])
l = loss(pred, labels[batch_indices], mx.nd.expand_dims(scale_pos_weight*train_mask, 1)[batch_indices])
l = l.sum()/len(batch)
l.backward()
trainer.step(batch_size=1, ignore_stale_grad=True)
loss_val += l.asscalar()
# logging.info("Current loss {:04f}".format(loss_val/(n+1)))
duration.append(time.time() - tic)
train_metric, valid_metric = evaluate(model, train_g, features, labels, train_mask, valid_mask, ctx, batch_size, mini_batch)
logging.info("Epoch {:05d} | Time(s) {:.4f} | Training Loss {:.4f} | Training F1 {:.4f} | Validation F1 {:.4f}".format(
epoch, np.mean(duration), loss_val/(n+1), train_metric, valid_metric))
class_preds, pred_proba = get_model_class_predictions(model, test_g, test_loader, features, ctx, threshold=thresh)
if compute_metrics:
acc, f1, p, r, roc, pr, ap, cm = get_metrics(class_preds, pred_proba, labels, test_mask, output_dir)
logging.info("Metrics")
logging.info("""Confusion Matrix:
{}
f1: {:.4f}, precision: {:.4f}, recall: {:.4f}, acc: {:.4f}, roc: {:.4f}, pr: {:.4f}, ap: {:.4f}
""".format(cm, f1, p, r, acc, roc, pr, ap))
return model, class_preds, pred_proba
def evaluate(model, g, features, labels, train_mask, valid_mask, ctx, batch_size, mini_batch=True):
train_f1, valid_f1 = mx.metric.F1(), mx.metric.F1()
preds = []
batch_size = batch_size if mini_batch else features.shape[0]
dataloader = gluon.data.BatchSampler(gluon.data.SequentialSampler(features.shape[0]), batch_size, 'keep')
for batch in dataloader:
node_flow, batch_nids = g.sample_block(nd.array(batch).astype('int64'))
preds.append(model(node_flow, features[batch_nids.as_in_context(ctx)]))
nd.waitall()
# preds = nd.concat(*preds, dim=0).argmax(axis=1)
preds = nd.concat(*preds, dim=0)
train_mask = nd.array(np.where(train_mask.asnumpy()), ctx=ctx)
valid_mask = nd.array(np.where(valid_mask.asnumpy()), ctx=ctx)
train_f1.update(preds=nd.softmax(preds[train_mask], axis=1).reshape(-3, 0), labels=labels[train_mask].reshape(-1,))
valid_f1.update(preds=nd.softmax(preds[valid_mask], axis=1).reshape(-3, 0), labels=labels[valid_mask].reshape(-1,))
return train_f1.get()[1], valid_f1.get()[1]
def get_model_predictions(model, g, dataloader, features, ctx):
pred = []
for batch in dataloader:
node_flow, batch_nids = g.sample_block(nd.array(batch).astype('int64'))
pred.append(model(node_flow, features[batch_nids.as_in_context(ctx)]))
nd.waitall()
return nd.concat(*pred, dim=0)
def get_model_class_predictions(model, g, datalaoder, features, ctx, threshold=None):
unnormalized_preds = get_model_predictions(model, g, datalaoder, features, ctx)
pred_proba = nd.softmax(unnormalized_preds)[:, 1].asnumpy().flatten()
if not threshold:
return unnormalized_preds.argmax(axis=1).asnumpy().flatten().astype(int), pred_proba
return np.where(pred_proba > threshold, 1, 0), pred_proba
def save_prediction(pred, pred_proba, id_to_node, training_dir, new_accounts, output_dir, predictions_file):
prediction_query = read_masked_nodes(os.path.join(training_dir, new_accounts))
pred_indices = np.array([id_to_node[query] for query in prediction_query])
pd.DataFrame.from_dict({'target': prediction_query,
'pred_proba': pred_proba[pred_indices],
'pred': pred[pred_indices]}).to_csv(os.path.join(output_dir, predictions_file),
index=False)
def save_model(g, model, model_dir, hyperparams):
model.save_parameters(os.path.join(model_dir, 'model.params'))
with open(os.path.join(model_dir, 'model_hyperparams.pkl'), 'wb') as f:
pickle.dump(hyperparams, f)
with open(os.path.join(model_dir, 'graph.pkl'), 'wb') as f:
pickle.dump(g, f)
def get_model(g, hyperparams, in_feats, n_classes, ctx, model_dir=None):
if model_dir: # load using saved model state
with open(os.path.join(model_dir, 'model_hyperparams.pkl'), 'rb') as f:
hyperparams = pickle.load(f)
with open(os.path.join(model_dir, 'graph.pkl'), 'rb') as f:
g = pickle.load(f)
if hyperparams['heterogeneous']:
model = HeteroRGCN(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
hyperparams['embedding_size'],
ctx)
else:
if hyperparams['model'] == 'gcn':
model = GCN(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
nd.relu,
hyperparams['dropout'])
elif hyperparams['model'] == 'graphsage':
model = GraphSAGE(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
nd.relu,
hyperparams['dropout'],
hyperparams['aggregator_type'])
else:
heads = ([hyperparams['num_heads']] * hyperparams['n_layers']) + [hyperparams['num_out_heads']]
model = GAT(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
heads,
gluon.nn.Lambda(lambda data: nd.LeakyReLU(data, act_type='elu')),
hyperparams['dropout'],
hyperparams['attn_drop'],
hyperparams['alpha'],
hyperparams['residual'])
if hyperparams['no_features']:
model = NodeEmbeddingGNN(model, in_feats, hyperparams['embedding_size'])
if model_dir:
model.load_parameters(os.path.join(model_dir, 'model.params'))
else:
model.initialize(ctx=ctx)
return model
if __name__ == '__main__':
logging = get_logger(__name__)
logging.info('numpy version:{} MXNet version:{} DGL version:{}'.format(np.__version__,
mx.__version__,
dgl.__version__))
args = parse_args()
args.edges = get_edgelists(args.edges, args.training_dir)
g, features, id_to_node = construct_graph(args.training_dir, args.edges, args.nodes, args.target_ntype,
args.heterogeneous)
features = normalize(nd.array(features))
if args.heterogeneous:
g.nodes['target'].data['features'] = features
else:
g.ndata['features'] = features
logging.info("Getting labels")
n_nodes = g.number_of_nodes('target') if args.heterogeneous else g.number_of_nodes()
labels, train_mask, valid_mask, test_mask = get_labels(
id_to_node,
n_nodes,
args.target_ntype,
os.path.join(args.training_dir, args.labels),
os.path.join(args.training_dir, args.validation_data),
os.path.join(args.training_dir, args.new_accounts),
)
logging.info("Got labels")
labels = nd.array(labels).astype('float32')
train_mask = nd.array(train_mask).astype('float32')
valid_mask = nd.array(valid_mask).astype('float32')
test_mask = nd.array(test_mask).astype('float32')
n_nodes = sum([g.number_of_nodes(n_type) for n_type in g.ntypes]) if args.heterogeneous else g.number_of_nodes()
n_edges = sum([g.number_of_edges(e_type) for e_type in g.etypes]) if args.heterogeneous else g.number_of_edges()
logging.info("""----Data statistics------'
#Nodes: {}
#Edges: {}
#Features Shape: {}
#Labeled Train samples: {}
#Unlabeled Test samples: {}""".format(n_nodes,
n_edges,
features.shape,
train_mask.sum().asscalar(),
test_mask.sum().asscalar()))
if args.num_gpus:
cuda = True
ctx = mx.gpu(0)
else:
cuda = False
ctx = mx.cpu(0)
logging.info("Initializing Model")
in_feats = args.embedding_size if args.no_features else features.shape[1]
n_classes = 2
model = get_model(g, vars(args), in_feats, n_classes, ctx)
logging.info("Initialized Model")
if args.no_features:
features = nd.array(g.nodes('target'), ctx) if args.heterogeneous else nd.array(g.nodes(), ctx)
else:
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
valid_mask = valid_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
if not args.heterogeneous:
# normalization
degs = g.in_degrees().astype('float32')
norm = mx.nd.power(degs, -0.5)
if cuda:
norm = norm.as_in_context(ctx)
g.ndata['norm'] = mx.nd.expand_dims(norm, 1)
if args.mini_batch:
train_g = HeteroGraphNeighborSampler(g, 'target', args.n_layers, args.n_neighbors) if args.heterogeneous\
else NeighborSampler(g, args.n_layers, args.n_neighbors)
test_g = HeteroGraphNeighborSampler(g, 'target', args.n_layers) if args.heterogeneous\
else NeighborSampler(g, args.n_layers)
else:
train_g, test_g = FullGraphSampler(g, args.n_layers), FullGraphSampler(g, args.n_layers)
train_data, test_data = get_dataloader(features.shape[0], args.batch_size, args.mini_batch)
loss = gluon.loss.SoftmaxCELoss()
scale_pos_weight = nd.sqrt((train_mask.shape[0] - train_mask.sum()) / train_mask.sum())
logging.info(model)
logging.info(model.collect_params())
trainer = gluon.Trainer(model.collect_params(), args.optimizer, {'learning_rate': args.lr, 'wd': args.weight_decay})
logging.info("Starting Model training")
model, pred, pred_proba = train(model, trainer, loss, features, labels, train_data, test_data, train_g, test_g,
train_mask, valid_mask, test_mask, ctx, args.n_epochs, args.batch_size, args.output_dir,
args.threshold, scale_pos_weight, args.compute_metrics, args.mini_batch)
logging.info("Finished Model training")
logging.info("Saving model")
save_model(g, model, args.model_dir, vars(args))
logging.info("Saving model predictions for new accounts")
save_prediction(pred, pred_proba, id_to_node, args.training_dir, args.new_accounts, args.output_dir, args.predictions)
|
the-stack_0_9679 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django_fsm import transition, RETURN_VALUE
from shop.models.order import BaseOrder, OrderModel
from .base import PaymentProvider
class ForwardFundPayment(PaymentProvider):
"""
Provides a simple prepayment payment provider.
"""
namespace = 'forward-fund-payment'
def get_payment_request(self, cart, request):
order = OrderModel.objects.create_from_cart(cart, request)
order.populate_from_cart(cart, request)
if order.total == 0:
order.no_payment_required()
else:
order.awaiting_payment()
order.save()
thank_you_url = OrderModel.objects.get_latest_url()
return '$window.location.href="{}";'.format(thank_you_url)
class ManualPaymentWorkflowMixin(object):
"""
Add this class to `settings.SHOP_ORDER_WORKFLOWS` to mix it into your `OrderModel`.
It adds all the methods required for state transitions, when used with the
`ForwardFundPayment` provider from above.
"""
TRANSITION_TARGETS = {
'awaiting_payment': _("Awaiting a forward fund payment"),
'prepayment_deposited': _("Prepayment deposited"),
'no_payment_required': _("No Payment Required"),
}
def __init__(self, *args, **kwargs):
if not isinstance(self, BaseOrder):
raise ImproperlyConfigured("class 'ManualPaymentWorkflowMixin' is not of type 'BaseOrder'")
CancelOrderWorkflowMixin.CANCELABLE_SOURCES.update(['awaiting_payment', 'prepayment_deposited',
'no_payment_required'])
super(ManualPaymentWorkflowMixin, self).__init__(*args, **kwargs)
def is_fully_paid(self):
return super(ManualPaymentWorkflowMixin, self).is_fully_paid()
@transition(field='status', source=['created'], target='no_payment_required')
def no_payment_required(self):
"""
Signals that an Order can proceed directly, by confirming a payment of value zero.
"""
@transition(field='status', source=['created'], target='awaiting_payment')
def awaiting_payment(self):
"""
Signals that the current Order awaits a payment.
Invoked by ForwardFundPayment.get_payment_request.
"""
def deposited_too_little(self):
return self.amount_paid > 0 and self.amount_paid < self.total
@transition(field='status', source=['awaiting_payment'], target='awaiting_payment',
conditions=[deposited_too_little], custom=dict(admin=True, button_name=_("Deposited too little")))
def prepayment_partially_deposited(self):
"""
Signals that the current Order received a payment, which was not enough.
"""
@transition(field='status', source=['awaiting_payment'], target='prepayment_deposited',
conditions=[is_fully_paid], custom=dict(admin=True, button_name=_("Mark as Paid")))
def prepayment_fully_deposited(self):
"""
Signals that the current Order received a payment, which fully covers the requested sum.
"""
@transition(field='status', source=['prepayment_deposited', 'no_payment_required'],
custom=dict(auto=True))
def acknowledge_prepayment(self):
"""
Acknowledge the payment. This method is invoked automatically.
"""
self.acknowledge_payment()
@transition(field='status', source='refund_payment', target=RETURN_VALUE('refund_payment', 'order_canceled'),
custom=dict(admin=True, button_name=_("Mark as Refunded")))
def payment_refunded(self):
"""
Signals that the payment for this Order has been refunded manually.
"""
return 'refund_payment' if self.amount_paid else 'order_canceled'
class CancelOrderWorkflowMixin(object):
"""
Add this class to `settings.SHOP_ORDER_WORKFLOWS` to mix it into your `OrderModel`.
It adds all the methods required for state transitions, to cancel an order.
"""
CANCELABLE_SOURCES = {'new', 'created', 'payment_confirmed', 'payment_declined'}
TRANSITION_TARGETS = {
'refund_payment': _("Refund payment"),
'order_canceled': _("Order Canceled"),
}
def cancelable(self):
return self.status in self.CANCELABLE_SOURCES
@transition(field='status', target=RETURN_VALUE(*TRANSITION_TARGETS.keys()),
conditions=[cancelable], custom=dict(admin=True, button_name=_("Cancel Order")))
def cancel_order(self):
"""
Signals that an Order shall be canceled.
"""
if self.amount_paid:
self.refund_payment()
return 'refund_payment' if self.amount_paid else 'order_canceled'
|
the-stack_0_9680 | """
Rate expressions only for A+B=R (ABtoR)
"""
import numpy as np
from pmutt import constants as c
species_names = ['A', 'B', 'R']
#%% Define the form of the rate constant
class RateConstant():
def __init__(self, name = 'k'):
self.name = name
def value(self, para_dict, temperature=None, energy_unit='eV'):
if temperature is None:
k_value = para_dict[self.name] # input is log10(prefactor)
else:
# based on the unit of Ea, must be J, kJ, cal, kcal, eV etc.
# set the unit for kb
kb_unit = energy_unit + '/K'
prefactor = para_dict[self.name+'_prefactor']
Ea = 10**(para_dict[self.name+'_Ea']) # input is log10(Ea)
k_value = prefactor * np.exp(-Ea/c.kb(kb_unit)/temperature)
return k_value
#%% Define all groups in the table as dictionaries
#%%
# Driving force group (DFG)
def driving_suface_reaction_controlling(concentrations, para_dict, temperature=None):
K = RateConstant('K').value(para_dict, temperature)
return concentrations[0]*concentrations[1] - concentrations[2]/K
def driving_adsorption_controlling_w_dissociation(concentrations, para_dict, temperature=None):
K = RateConstant('K').value(para_dict, temperature)
return concentrations[0] - concentrations[2]/concentrations[1]/K
driving_force_groups = {'surface reaction controlling': driving_suface_reaction_controlling,
'adsorption controlling': driving_adsorption_controlling_w_dissociation}
#%%
# Kinetic group
def kinetic_suface_reaction_controlling(para_dict, temperature=None):
ksr = RateConstant('ksr').value(para_dict, temperature)
KA = RateConstant('KA').value(para_dict, temperature)
KB = RateConstant('KB').value(para_dict, temperature)
return ksr*KA*KB
def kinetic_adsorption_controlling_w_dissociation(para_dict, species = 'A', temperature=None):
KA = RateConstant('K'+species).value(para_dict, temperature)
return KA
kinetic_groups = {'surface reaction controlling': kinetic_suface_reaction_controlling,
'adsorption controlling with dissociation': kinetic_adsorption_controlling_w_dissociation}
#%%
# Adsorption group
def adsorption_default(concentrations, para_dict, species = 'A', temperature=None):
Kx = RateConstant('K'+species).value(para_dict, temperature)
return Kx*concentrations[species_names.index(species)]
def adsorption_equilirium_w_dissociation(concentrations, para_dict, species = 'A', temperature=None):
Kx = RateConstant('K'+species).value(para_dict, temperature)
return np.sqrt(Kx*concentrations[species_names.index(species)])
def adsorption_controlling_w_dissociation(concentrations, para_dict, species = 'A', temperature=None):
Kx = RateConstant('K'+species).value(para_dict, temperature)
K = RateConstant('K').value(para_dict, temperature)
return np.sqrt(Kx*concentrations[species_names.index('R')]/K/concentrations[species_names.index('B')])
adsorption_groups = {'adsorption default': adsorption_default,
'adsorption equilirium with dissociation': adsorption_equilirium_w_dissociation,
'adsorption controlling with dissociation': adsorption_controlling_w_dissociation}
# Exponents of adsorption groups
exponents = {'surface reaction controlling': {'dissociation': 3},
'adsorption controlling with dissociation': 2}
#%% Define the rate expressions
# General rate experssion
def general_rate(concentrations, para_dict, stoichiometry=None, name=None, temperature=None):
"""Rate expressions from Yang and Hougen
"""
controling_key = 'surface reaction controlling'
ads_key = 'adsorption equilirium with dissociation'
surface_reaction_key = 'dissociation'
adsorption_terms = (1 + adsorption_groups[ads_key](concentrations, para_dict, 'A', temperature) + \
adsorption_groups[ads_key](concentrations, para_dict, 'B', temperature))**exponents[controling_key][surface_reaction_key]
rate = driving_force_groups[controling_key](concentrations, para_dict, temperature) * \
kinetic_groups[controling_key](para_dict, temperature)/adsorption_terms
return rate
def general_rate_ads(concentrations, para_dict, stoichiometry=None, name=None, temperature=None):
"""Rate expressions from Yang and Hougen
"""
controling_key = 'adsorption controlling'
ads_key = 'adsorption controlling with dissociation'
#surface_reaction_key = 'dissociation'
adsorption_terms = (1 + adsorption_groups[ads_key](concentrations, para_dict, 'A', temperature) + \
adsorption_groups['adsorption default'](concentrations, para_dict, 'B', temperature))**exponents[ads_key]
rate = driving_force_groups[controling_key](concentrations, para_dict, temperature) * \
kinetic_groups[ads_key](para_dict, temperature)/adsorption_terms
return rate
|
the-stack_0_9681 | """
NOTE: This file is not using to.testing.assert_allclose because most methods need to work for both torch and numpy.
"""
import pytest
import numpy as np
import torch as to
import itertools
import pickle
from typing import NamedTuple
from pyrado.algorithms.utils import ReplayMemory
from pyrado.sampling.step_sequence import StepSequence
from pyrado.sampling.data_format import to_format
from pyrado.sampling.step_sequence import discounted_value, gae_returns
from pyrado.sampling.rollout import rollout
from pyrado.environments.pysim.ball_on_beam import BallOnBeamSim
rewards = [
-200,
-100,
-50,
-25,
-17.5,
]
# Observations has one additional element
observations = [
np.array([3, 2, 7]),
np.array([3, 1, 7]),
np.array([2, 0, 7]),
np.array([3, 1, 3]),
np.array([0, 2, 4]),
np.array([1, 1, 1]),
]
# Actions come from PyTorch
actions = [
to.tensor([0, 1]),
to.tensor([0, 3]),
to.tensor([2, 4]),
to.tensor([3, 1]),
to.tensor([0, 0]),
]
# Policy infos as dict collapse test
policy_infos = [
{'mean': np.array([0, 1]), 'std': 0.4},
{'mean': np.array([0, 3]), 'std': 0.2},
{'mean': np.array([2, 4]), 'std': 0.1},
{'mean': np.array([3, 1]), 'std': 0.05},
{'mean': np.array([0, 0]), 'std': 0.025},
]
# Hidden is a tuple, like we see with LSTMs
hidden = [
(np.array([3, 2, 7]), np.array([2, 1])),
(np.array([4, 9, 8]), np.array([5, 6])),
(np.array([1, 4, 9]), np.array([7, 3])),
(np.array([0, 8, 2]), np.array([4, 9])),
(np.array([2, 7, 6]), np.array([8, 0])),
]
def test_create_rew_only():
# Don't require additional fields for this test
StepSequence.required_fields = {}
ro = StepSequence(rewards=rewards, data_format='numpy')
assert len(ro) == 5
assert (ro.rewards == np.array(rewards)).all()
@pytest.mark.parametrize(
'data_format, tensor_type', [('numpy', np.ndarray), ('torch', to.Tensor)], ids=['numpy', 'torch']
)
def test_create(data_format, tensor_type):
# With actions, observations and dicts
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
assert len(ro) == 5
assert isinstance(ro.rewards, tensor_type)
assert isinstance(ro.observations, tensor_type)
assert isinstance(ro.actions, tensor_type)
assert isinstance(ro.policy_infos['mean'], tensor_type)
assert isinstance(ro.policy_infos['std'], tensor_type)
assert isinstance(ro.hidden[0], tensor_type)
# Done should always be a ndarray
assert isinstance(ro.done, np.ndarray)
assert not ro.done[:-1].any()
assert ro.done[-1]
@pytest.mark.parametrize(
'other_format, tensor_type', [('torch', np.ndarray), ('numpy', to.Tensor)],
ids=['numpy to torch', 'torch to numpy']
)
def test_convert(other_format, tensor_type):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=other_format)
# convert
if other_format == 'numpy':
ro.torch()
elif other_format == 'torch':
ro.numpy()
# Verify
assert isinstance(ro.rewards, tensor_type)
assert isinstance(ro.observations, tensor_type)
assert isinstance(ro.actions, tensor_type)
assert isinstance(ro.policy_infos['mean'], tensor_type)
assert isinstance(ro.policy_infos['std'], tensor_type)
assert isinstance(ro.hidden[0], tensor_type)
# Done should always be a ndarray
assert isinstance(ro.done, np.ndarray)
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_step_iter(data_format):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
assert len(ro) == 5
for i, step in enumerate(ro):
assert step.reward == rewards[i]
# Check current and next
assert (step.observation == to_format(observations[i], data_format)).all()
assert (step.next_observation == to_format(observations[i + 1], data_format)).all()
# Check dict sub element
assert (step.policy_info.mean == to_format(policy_infos[i]['mean'], data_format)).all()
assert (step.hidden[0] == to_format(hidden[i][0], data_format)).all()
@pytest.mark.parametrize(
'sls', [slice(2, 4), slice(2, 5, 2), slice(3), slice(4, None)]
)
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_slice(sls, data_format):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
# Slice rollout
sliced = ro[sls]
# Slice reward list for verification
sliced_rew = rewards[sls]
for i, step in enumerate(sliced):
assert step.reward == sliced_rew[i]
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_add_data(data_format):
ro = StepSequence(
rewards=rewards,
observations=observations,
actions=actions,
policy_infos=policy_infos,
hidden=hidden,
data_format=data_format
)
# Add a data field
ro.add_data('return', discounted_value(ro, 0.9))
assert hasattr(ro, 'return')
# Query new data field from steps
assert abs(ro[2]['return'] - -86.675) < 0.01
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_concat(data_format):
# Create some rollouts with random rewards
ros = [
StepSequence(
rewards=np.random.randn(5),
observations=np.random.randn(6),
actions=np.random.randn(5),
policy_infos={'mean': np.random.randn(5)},
hidden=(np.random.randn(5), np.random.randn(5)),
data_format=data_format
),
StepSequence(
rewards=np.random.randn(5),
observations=np.random.randn(6),
actions=np.random.randn(5),
policy_infos={'mean': np.random.randn(5)},
hidden=(np.random.randn(5), np.random.randn(5)),
data_format=data_format
)
]
# Perform concatenation
cat = StepSequence.concat(ros)
assert cat.continuous
assert cat.rollout_count == 2
# Check steps
for step_ro, step_cat in zip(itertools.chain.from_iterable(ros), cat):
assert step_ro.reward == step_cat.reward
assert step_ro.observation == step_cat.observation
assert step_ro.done == step_cat.done
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_split_multi(data_format):
# Don't require additional fields for this test
StepSequence.required_fields = {}
ro = StepSequence(
rewards=np.arange(20),
rollout_bounds=[0, 4, 11, 17, 20],
data_format=data_format
)
# There should be four parts
assert ro.rollout_count == 4
# Of these sizes
assert list(ro.rollout_lengths) == [4, 7, 6, 3]
# Test selecting one
s1 = ro.get_rollout(1)
assert s1.rollout_count == 1
assert s1[0].reward == ro[4].reward
# Test selecting a slice
s2 = ro.get_rollout(slice(1, -1))
assert s2.rollout_count == 2
assert s2[0].reward == ro[4].reward
assert s2[7].reward == ro[11].reward
# Test selecting by list
s2 = ro.get_rollout([1, 3])
assert s2.rollout_count == 2
assert s2[0].reward == ro[4].reward
assert s2[7].reward == ro[17].reward
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_pickle(data_format):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
# Pickle/unpickle
ro2 = pickle.loads(pickle.dumps(ro, pickle.HIGHEST_PROTOCOL))
for step, step_pi in zip(ro, ro2):
assert step.reward == step_pi.reward
assert (step.observation == step_pi.observation).all()
assert (step.action == step_pi.action).all()
assert step.done == step_pi.done
@pytest.mark.parametrize(
'env', [
BallOnBeamSim(dt=0.01, max_steps=200),
], ids=['bob_linpol']
)
def test_advantage_calculation(env, linear_policy):
ro = rollout(env, linear_policy)
gamma = 0.99
lamb = 0.95
# Add dummy values
values = np.ones_like(ro.rewards)
if not ro.done[-1]:
values = to.cat([values, 0])
ro.add_data('values', values)
gae1 = gae_returns(ro, gamma, lamb)
# Compute the advantages
gae2 = np.empty_like(values)
for k in reversed(range(ro.length)):
if ro[k].done:
gae2[k] = ro[k].reward - values[k]
else:
gae2[k] = ro[k].reward + gamma*values[k + 1] - values[k] + \
gamma*lamb*gae2[k + 1]
assert (gae1 == gae2).all()
@pytest.mark.replay
@pytest.mark.parametrize(
'capacity', [
1, 2, 8,
], ids=['1', '2', '8']
)
def test_replay_memory(capacity):
rm = ReplayMemory(capacity)
# Create fake rollouts (of length 5)
ro1 = StepSequence(rewards=rewards, observations=observations, actions=actions, hidden=hidden)
ro2 = StepSequence(rewards=rewards, observations=observations, actions=actions, hidden=hidden)
# Concatenate them for testing only
ros = StepSequence.concat([ro1, ro2], truncate_last=True) # same truncate_last behavior as push function
# Check the lengths
rm.push(ro1)
assert len(rm) == len(ro1) or len(rm) == capacity
rm.push(ro2)
assert len(rm) == len(ro1) + len(ro1) or len(rm) == capacity
# Check the elements
shift = len(ros) - capacity
if shift < len(ro1):
assert all(rm.memory.observations[0] == ros.observations[shift])
assert all(rm.memory.observations[-1] == ro2.observations[-2]) # -2 since one was truncated
# A dummy namedtuple for testing
class DummyNT(NamedTuple):
part1: to.Tensor
part2: to.Tensor
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_namedtuple(data_format):
hid_nt = [DummyNT(*it) for it in hidden]
ro = StepSequence(
rewards=rewards,
hidden=hid_nt,
data_format=data_format
)
assert isinstance(ro.hidden, DummyNT)
for i, step in enumerate(ro):
assert isinstance(step.hidden, DummyNT)
assert (step.hidden.part1 == to_format(hid_nt[i].part1, data_format)).all()
|
the-stack_0_9683 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sanic import Blueprint
from sanic import response
from trial_rest_api.trial_common import transaction as trial_transaction
from trial_rest_api.consent_common import transaction as consent_transaction
from trial_rest_api import general, security_messaging
from trial_rest_api.errors import ApiBadRequest, ApiInternalError
INVESTIGATORS_BP = Blueprint('investigators')
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
# Used
@INVESTIGATORS_BP.get('investigators')
async def get_all_investigators(request):
"""Fetches complete details of all Accounts in state"""
client_key = general.get_request_key_header(request)
investigator_list = await security_messaging.get_investigators(request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN, client_key)
investigator_list_json = []
for address, dp in investigator_list.items():
investigator_list_json.append({
'public_key': dp.public_key,
'name': dp.name
})
return response.json(body={'data': investigator_list_json},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.post('investigators')
async def register_investigator(request):
"""Updates auth information for the authorized account"""
required_fields = ['name']
general.validate_fields(required_fields, request.json)
name = request.json.get('name')
clinic_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()
# Consent network
client_txn = consent_transaction.create_investigator_client(
txn_signer=clinic_signer,
batch_signer=clinic_signer
)
batch, batch_id = consent_transaction.make_batch_and_id([client_txn], clinic_signer)
await security_messaging.add_investigator(
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch])
try:
await security_messaging.check_batch_status(
request.app.config.CONSENT_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
# Trial network
clinic_txn = trial_transaction.create_investigator(
txn_signer=clinic_signer,
batch_signer=clinic_signer,
name=name
)
batch, batch_id = trial_transaction.make_batch_and_id([clinic_txn], clinic_signer)
await security_messaging.add_investigator(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.TIMEOUT,
[batch])
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.get('investigators/import_to_trial_data/<patient_pkey>/<ehr_id>')
async def import_screening_data(request, patient_pkey, ehr_id):
"""Updates auth information for the authorized account"""
res_json = general.get_response_from_ehr(request, "/ehrs/" + patient_pkey + "/" + ehr_id)
investigator_pkey = general.get_request_key_header(request)
client_signer = general.get_signer(request, investigator_pkey)
data_json = res_json['data']
if not data_json:
raise ApiBadRequest("Can not retrieve '" + ehr_id + "' EHR ' for '" + patient_pkey + "' patient")
data_txn = trial_transaction.add_data(
txn_signer=client_signer,
batch_signer=client_signer,
uid=data_json['id'],
height=data_json['height'],
weight=data_json['weight'],
a1c=data_json['A1C'],
fpg=data_json['FPG'],
ogtt=data_json['OGTT'],
rpgt=data_json['RPGT'],
event_time=data_json['event_time'])
batch, batch_id = trial_transaction.make_batch_and_id([data_txn], client_signer)
await security_messaging.import_screening_data(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], investigator_pkey)
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.get('investigators/data')
async def get_all_data_from_investigators(request):
"""Fetches complete details of all Accounts in state"""
client_key = general.get_request_key_header(request)
data_list = await security_messaging.get_data_from_investigators(request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN, client_key)
data_list_json = []
for address, data in data_list.items():
data_list_json.append({
'id': data.id,
'height': data.height,
'weight': data.weight,
'A1C': data.A1C,
'FPG': data.FPG,
'OGTT': data.OGTT,
'RPGT': data.RPGT,
'event_time': data.event_time,
'eligible': data.eligible
})
return response.json(body={'data': data_list_json},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.post('investigators/data/update')
async def update_data(request):
client_key = general.get_request_key_header(request)
required_fields = ['id', 'height', 'weight', 'A1C', 'FPG', 'OGTT', 'RPGT']
general.validate_fields(required_fields, request.json)
uid = request.json.get('id')
height = request.json.get('height')
weight = request.json.get('weight')
A1C = request.json.get('A1C')
FPG = request.json.get('FPG')
OGTT = request.json.get('OGTT')
RPGT = request.json.get('RPGT')
client_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()
client_txn = trial_transaction.update_data(
txn_signer=client_signer,
batch_signer=client_signer,
uid=uid,
height=height,
weight=weight,
a1c=A1C,
fpg=FPG,
ogtt=OGTT,
rpgt=RPGT)
batch, batch_id = trial_transaction.make_batch_and_id([client_txn], client_signer)
await security_messaging.update_investigator(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.get('investigators/request_inform_consent/<patient_pkey>')
async def request_inform_consent(request, patient_pkey):
"""Updates auth information for the authorized account"""
client_key = general.get_request_key_header(request)
client_signer = general.get_signer(request, client_key)
grant_read_ehr_permission_txn = consent_transaction.request_inform_document_consent(
txn_signer=client_signer,
batch_signer=client_signer,
patient_pkey=patient_pkey)
batch, batch_id = trial_transaction.make_batch_and_id([grant_read_ehr_permission_txn], client_signer)
await security_messaging.request_inform_document_consent(
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.CONSENT_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.post('investigators/data/eligible')
async def set_eligible(request):
client_key = general.get_request_key_header(request)
required_fields = ['id', 'eligible']
general.validate_fields(required_fields, request.json)
uid = request.json.get('id')
eligible = bool(request.json.get('eligible'))
client_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()
client_txn = trial_transaction.set_eligible(
txn_signer=client_signer,
batch_signer=client_signer,
uid=uid,
eligible=eligible)
batch, batch_id = trial_transaction.make_batch_and_id([client_txn], client_signer)
await security_messaging.set_eligible(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
|
the-stack_0_9684 | import collections as co
import itertools as it
#argument_parser = argparse.ArgumentParser()
#argument_parser.add_argument("jff_path", metavar="jff-file", type=str)
#args = argument_parser.parse_args()
_Original = co.namedtuple('_Original', ('symbol',))
_Term = co.namedtuple('_Term', ('symbol',))
_Bin = co.namedtuple('_Bin', ('string',))
_Start = co.namedtuple('_Start', ())
Grammar = co.namedtuple('Grammar', ('rules'))
def _parse_rule(rule):
tag, leaf, (
(left_tag, left_leaf, left),
(right_tag, right_leaf, right),
) = rule
assert tag == "production"
assert not leaf
assert left_tag == "left"
assert left_leaf
assert right_tag == "right"
assert right_leaf
return left, right
def parse(structure):
assert structure.type == "grammar"
rules = {}
for left, target in map(_parse_rule, structure.body):
assert len(left) == 1
if left not in rules:
rules[left] = set()
if target is None:
rules[left].add(())
continue
rules[left].add(tuple(target))
return rules
def _copy_rules(rules):
return {left: set(targets) for left, targets in rules.items()}
def _chomsky_normalize_rename(rules):
return {
("original", left): {
tuple(("original", symbol) for symbol in target)
for target in targets
}
for left, targets in rules.items()
}
def _chomsky_normalize_start(rules, start):
new_rules = {**rules, ('start',): {(start,)}}
_copy_rules(rules)
new_rules['start', ] = {(start,)}
return new_rules
def _compute_symbols(rules):
symbols = set()
for source, targets in rules.items():
for target in targets:
symbols |= set(target)
return symbols
def _chomsky_normalize_term(rules):
new_rules = {
source: {
tuple(
("term", symbol) if symbol not in rules else symbol
for symbol in target
)
for target in targets
}
for source, targets in rules.items()
}
for symbol in _compute_symbols(rules) - set(rules.keys()):
new_rules[("term", symbol)] = {(symbol,)}
return new_rules
def _chomsky_normalize_bin(rules):
new_rules = {}
for source, targets in rules.items():
new_rules[source] = set()
for target in targets:
if len(target) <= 2:
new_rules[source].add(target)
continue
new_rules[source].add((target[0], ("bin", target[1:])))
for symbol_i, symbol in enumerate(target[1:-2], start=1):
new_rules["bin", target[symbol_i:]] = {
(symbol, ("bin", target[symbol_i + 1:]))
}
new_rules["bin", target[-2:]] = {target[-2:]}
return new_rules
def _inline_nullable(string, symbol):
if symbol not in string:
yield string
return
index = string.index(symbol)
for rest in _inline_nullable(string[index + 1:], symbol):
yield string[:index] + rest
yield string[: index + 1] + rest
def _chomsky_normalize_del(rules):
nullables = set()
new_nullables = True
while new_nullables:
new_nullables = False
for source, targets in rules.items():
if source in nullables:
continue
for target in targets:
nullable = True
for symbol in target:
if symbol not in nullables:
nullable = False
break
if nullable:
nullables.add(source)
new_nullables = True
break
new_rules = _copy_rules(rules)
for source, targets in rules.items():
for target in targets:
for nullable in set(target) & nullables:
for new_target in _inline_nullable(target, nullable):
new_rules[source].add(new_target)
for source in nullables:
new_rules[source].discard(())
return new_rules
def _chomsky_normalize_unit_for_symbol(rules, source, seen=set()):
for target in rules[source]:
if not (len(target) == 1 and target[0] in rules):
yield target
continue
for symbol in target:
if symbol in seen:
continue
yield from _chomsky_normalize_unit_for_symbol(
rules, symbol, seen | {source}
)
def _chomsky_normalize_unit(rules):
return {
source: set(_chomsky_normalize_unit_for_symbol(rules, source))
for source in rules
}
def _chomsky_normalize(rules, start):
return _chomsky_normalize_prettify(
_chomsky_normalize_unit(
_chomsky_normalize_del(
_chomsky_normalize_bin(
_chomsky_normalize_term(
_chomsky_normalize_start(
_chomsky_normalize_rename(rules), start
)
)
)
)
)
)
def _prettify_symbol(symbol):
symbol_type, *args = symbol
if symbol_type == "original":
return args[0]
elif symbol_type == "term":
return "T{}".format(_prettify_symbol(args[0]))
elif symbol_type == "start":
return "start"
elif symbol_type == "bin":
return tuple(map(_prettify_symbol, args[0]))
return symbol
def _chomsky_normalize_prettify(rules):
return {
_prettify_symbol(source): {
tuple(_prettify_symbol(symbol) for symbol in target)
for target in targets
}
for source, targets in rules.items()
}
def _cyk_products(rules, string):
singles = co.defaultdict(set)
pairs = co.defaultdict(set)
for source, targets in rules.items():
for target in targets:
(singles if len(target) == 1 else pairs)[source].add(
target
)
products = co.defaultdict(lambda: co.defaultdict(set))
for source, targets in singles.items():
for target in targets:
products[source][target].add(target)
for substring_length in range(2, len(string) + 1):
for position in range(len(string) - substring_length + 1):
substring = string[position: position + substring_length]
for split in range(1, substring_length):
left_string, right_string = (
substring[:split],
substring[split:],
)
for source, targets in pairs.items():
for left, right in targets:
if (
left_string in products[left]
and right_string in products[right]
):
for left_tree, right_tree in it.product(
products[left][left_string],
products[right][right_string],
):
products[source][substring].add(
(
(left, left_tree),
(right, right_tree),
)
)
return products
def _cyk(rules, string, start):
for tree in _cyk_products(rules, string)[start][string]:
yield start, tree
def _format_parse_tree_lines(tree):
node, children = tree
if len(children) == 1:
yield "{!r},".format((node, children))
return
node, (left, right) = tree
head = "({!r}, ".format(node)
left_lines = _format_parse_tree_lines(left)
yield head + next(left_lines)
for line in left_lines:
yield " " * len(head) + line
for line in _format_parse_tree_lines(right):
yield " " * len(head) + line
def _format_parse_tree(tree):
return "\n".join(_format_parse_tree_lines(tree))
def run(rules, string, start):
return _cyk(_chomsky_normalize(rules, start))
#
#
# cnf = _chomsky_normalize(
# _parse_grammar(parser._parse_jff_structure(args.jff_path)),
# ("original", "S"),
# )
#
# for tree in _cyk(cnf, tuple("000#100"), "S"):
# print(_format_parse_tree(tree))
#
|
the-stack_0_9685 | #!/usr/bin/env python
"""
Tables dependencies in an Oracle query
"""
import lib_oracle
import lib_common
import lib_sql
from sources_types.sql import query as sql_query
from sources_types.oracle import query as oracle_query
def Main():
# cgiEnv = lib_common.CgiEnv()
cgiEnv = lib_oracle.OracleEnv()
grph = cgiEnv.GetGraph()
sqlQuery = sql_query.GetEnvArgs(cgiEnv)
dbNam = cgiEnv.m_entity_id_dict["Db"]
# This is simply the user.
oraSchema = cgiEnv.OracleSchema()
nodeSqlQuery = oracle_query.MakeUri(sqlQuery,dbNam)
propSheetToQuery = lib_common.MakeProp("Table dependency")
list_of_table_names = lib_sql.TableDependencies(sqlQuery)
list_of_nodes = oracle_query.QueryToNodesList(sqlQuery,{"Db":dbNam },list_of_table_names,oraSchema)
for nodTab in list_of_nodes:
grph.add( ( nodeSqlQuery, propSheetToQuery, nodTab ) )
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
|
the-stack_0_9688 | from decimal import Decimal
from typing import Optional, Dict, List
from django import forms
from django.utils.translation import gettext as _
from rest_framework.request import Request
from polaris.integrations import WithdrawalIntegration, calculate_fee
from polaris.models import Transaction, Asset
from polaris.sep10.token import SEP10Token
from polaris.templates import Template
from polaris.utils import getLogger
from ..forms import WithdrawForm
from .sep24_kyc import SEP24KYC
from ..models import PolarisStellarAccount, PolarisUserTransaction
logger = getLogger(__name__)
class MyWithdrawalIntegration(WithdrawalIntegration):
def form_for_transaction(
self,
request: Request,
transaction: Transaction,
post_data=None,
amount=None,
*args,
**kwargs,
) -> Optional[forms.Form]:
kyc_form, content = SEP24KYC.check_kyc(transaction, post_data)
if kyc_form:
return kyc_form
elif content or transaction.amount_in:
return None
elif post_data:
return WithdrawForm(transaction, post_data)
else:
return WithdrawForm(transaction, initial={"amount": amount})
def content_for_template(
self,
request: Request,
template: Template,
form: Optional[forms.Form] = None,
transaction: Optional[Transaction] = None,
*args,
**kwargs,
) -> Optional[Dict]:
na, content = SEP24KYC.check_kyc(transaction)
if content:
return content
elif template == Template.WITHDRAW:
if not form:
return None
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
"guidance": (
_(
"Please enter the banking details for the account "
"you would like to receive your funds."
)
),
}
else: # template == Template.MORE_INFO
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
}
def after_form_validation(
self,
request: Request,
form: forms.Form,
transaction: Transaction,
*args,
**kwargs,
):
try:
SEP24KYC.track_user_activity(form, transaction)
except RuntimeError:
# Since no polaris account exists for this transaction, KYCForm
# will be returned from the next form_for_transaction() call
logger.exception(
f"KYCForm was not served first for unknown account, id: "
f"{transaction.stellar_account}"
)
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Dict:
account = (
PolarisStellarAccount.objects.filter(
account=params["account"],
memo=params["memo"],
memo_type=params["memo_type"],
)
.select_related("user")
.first()
)
if not account:
return {
"type": "non_interactive_customer_info_needed",
"fields": [
"first_name",
"last_name",
"email_address",
"bank_number",
"bank_account_number",
],
}
elif not (account.user.bank_account_number and account.user.bank_number):
return {
"type": "non_interactive_customer_info_needed",
"fields": ["bank_number", "bank_account_number"],
}
elif params["type"] != "bank_account":
raise ValueError(_("'type' must be 'bank_account'"))
elif not params["dest"]:
raise ValueError(_("'dest' is required"))
elif not params["dest_extra"]:
raise ValueError(_("'dest_extra' is required"))
elif not account.confirmed:
# Here is where you would normally return something like this:
# {
# "type": "customer_info_status",
# "status": "pending"
# }
# However, we're not going to block the client from completing
# the flow since this is a reference server.
pass
asset = params["asset"]
min_amount = round(asset.withdrawal_min_amount, asset.significant_decimals)
max_amount = round(asset.withdrawal_max_amount, asset.significant_decimals)
if params["amount"]:
if not (min_amount <= params["amount"] <= max_amount):
raise ValueError(_("invalid 'amount'"))
transaction.amount_in = params["amount"]
transaction.amount_fee = calculate_fee(
{
"amount": params["amount"],
"operation": "withdraw",
"asset_code": asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
asset.significant_decimals,
)
transaction.save()
response = {
"account_id": asset.distribution_account,
"min_amount": min_amount,
"max_amount": max_amount,
"fee_fixed": round(asset.withdrawal_fee_fixed, asset.significant_decimals),
"fee_percent": asset.withdrawal_fee_percent,
}
if params["memo_type"] and params["memo"]:
response["memo_type"] = params["memo_type"]
response["memo"] = params["memo"]
PolarisUserTransaction.objects.create(
transaction_id=transaction.id, user=account.user, account=account
)
return response
def interactive_url(
self,
request: Request,
transaction: Transaction,
asset: Asset,
amount: Optional[Decimal],
callback: Optional[str],
*args: List,
**kwargs: Dict,
) -> Optional[str]:
raise NotImplementedError()
def save_sep9_fields(
self,
token: SEP10Token,
request: Request,
stellar_account: str,
fields: Dict,
language_code: str,
muxed_account: Optional[str] = None,
account_memo: Optional[str] = None,
account_memo_type: Optional[str] = None,
*args: List,
**kwargs: Dict,
):
raise NotImplementedError()
def patch_transaction(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args: List,
**kwargs: Dict,
):
raise NotImplementedError()
|
the-stack_0_9689 | import csv
import os.path
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
matplotlib.rcParams.update({'font.size': 15})
n_trial = 5
top_k = 1
batch_size = 4000
max_step = np.inf
max_reward = np.inf
min_reward = -np.inf
exp_name = 'CartpoleNd'
exp_param = 'D1K05A05Ec10'
extra_name = ''
prepath = "../" + exp_name + "/Data/AST/Lexington/" + exp_param
plot_path = "../" + exp_name + "/Data/Plot/top" + str(top_k) + "/"
policies = [
# "TRPO",\
"MCTSRS",\
# "MCTSAS",\
# "MCTSBV",\
# "GAP100T20K3Step1.0Fmean","GASMP100T20K3Step1.0Fmean",\
# "GAP500T20K3Step1.0Fmean","GASMP500T20K3Step1.0Fmean",\
]
plot_name = exp_name + '_' + exp_param + 'avgtop' + str(top_k) + 'trial' + str(n_trial) + extra_name
plts = []
legends = []
fig = plt.figure(figsize=(10, 10))
for (policy_index, policy) in enumerate(policies):
print(policy)
for trial in range(n_trial):
file_path = prepath + '/' + policy + '/' + str(trial) + '/process.csv'
if os.path.exists(file_path):
print(trial)
steps = []
rewards = []
with open(file_path) as csv_file:
if '\0' in open(file_path).read():
print("you have null bytes in your input file")
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
else:
csv_reader = csv.reader(csv_file, delimiter=',')
for (i, row) in enumerate(csv_reader):
if i == 0:
entry_dict = {}
for index in range(len(row)):
entry_dict[row[index]] = index
else:
# print(row[entry_dict["StepNum"]])
if int(row[entry_dict["StepNum"]]) > max_step:
break
if int(row[entry_dict["StepNum"]]) % batch_size == 0:
steps.append(int(row[entry_dict["StepNum"]]))
avg_top = 0.0
for k in range(top_k):
avg_top += np.clip(float(row[entry_dict["reward " + str(k)]]), min_reward, max_reward)
avg_top /= top_k
rewards.append(avg_top)
plot, = plt.plot(steps, rewards)
# plot, = plt.plot(steps,np.mean(np.exp(Rewards),0))
# plot,_,_ = plt.errorbar(steps,np.mean(Rewards,0),yerr=np.std(Rewards,0)/np.sqrt(n_trial),errorevery=10)
plts.append(plot)
legends.append(policy + '_' + str(trial))
plt.legend(plts, legends)
plt.xlabel('Step Number')
plt.ylabel('Top ' + str(top_k) + ' Reward')
fig.savefig(plot_path + plot_name)
plt.close(fig)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.