ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a383a8556a7c18573b1cfa0fe6ec5d272e6b3f8 | import math
from typing import Any
import torch
import torch.nn as nn
from torch.nn import functional as f
import numpy as np
BETA_START = 0.4
BETA_FRAMES = 100000
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
w = torch.full((out_features, in_features), sigma_init)
self.sigma_weight = nn.Parameter(w)
z = torch.zeros(out_features, in_features)
self.register_buffer("epsilon_weight", z)
if bias:
w = torch.full((out_features,), sigma_init)
self.sigma_bias = nn.Parameter(w)
z = torch.zeros(out_features)
self.register_buffer("epsilon_bias", z)
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, x):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
v = self.sigma_weight * self.epsilon_weight.data + self.weight
return f.linear(x, v, bias)
def _forward_unimplemented(self, *input_forward: Any) -> None:
pass
class NoisyDQN(nn.Module):
def __init__(self, input_shape, num_actions):
super(NoisyDQN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.noisy_layers = [
NoisyLinear(conv_out_size, 512),
NoisyLinear(512, num_actions)
]
self.fc = nn.Sequential(
self.noisy_layers[0],
nn.ReLU(),
self.noisy_layers[1]
)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
fx = x.float() / 256
conv_out = self.conv(fx).view(fx.size()[0], -1)
return self.fc(conv_out)
def noisy_layers_sigma_snr(self):
return [
((layer.weight ** 2).mean().sqrt() / (layer.sigma_weight ** 2).mean().sqrt()).item()
for layer in self.noisy_layers
]
def _forward_unimplemented(self, *input_forward: Any) -> None:
pass
class PrioritizedReplayBuffer:
def __init__(self, exp_source, buf_size, prob_alpha=0.6):
self.exp_source_iter = iter(exp_source)
self.prob_alpha = prob_alpha
self.capacity = buf_size
self.pos = 0
self.buffer = []
self.priorities = np.zeros((buf_size,), dtype=np.float32)
self.beta = BETA_START
def update_beta(self, idx):
v = BETA_START + idx * (1.0 - BETA_START) / BETA_FRAMES
self.beta = min(1.0, v)
return self.beta
def __len__(self):
return len(self.buffer)
def populate(self, count):
max_priority = self.priorities.max(initial=1.0) if self.buffer else 1.0
for _ in range(count):
sample = next(self.exp_source_iter)
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.priorities[self.pos] = max_priority
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size):
if len(self.buffer) == self.capacity:
priorities = self.priorities
else:
priorities = self.priorities[:self.pos]
probabilities = priorities ** self.prob_alpha
probabilities /= probabilities.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probabilities)
samples = [self.buffer[idx] for idx in indices]
total = len(self.buffer)
weights = (total * probabilities[indices]) ** (-self.beta)
weights /= weights.max()
return samples, indices, np.array(weights, dtype=np.float32)
def update_priorities(self, batch_indices, batch_priorities):
for idx, priority in zip(batch_indices, batch_priorities):
self.priorities[idx] = priority
|
py | 1a383c12a6789d37f4d16a625f5a9ba13f3aaa83 | """
All active unit tests for SciUnit. This module is the default target of
for testing in `__main__.py`. Modify this file if you want to add or remove
tests located in other modules.
"""
from .backend_tests import *
from .command_line_tests import *
from .config_tests import *
from .converter_tests import *
from .doc_tests import *
from .error_tests import *
from .import_tests import *
from .observation_tests import *
from .model_tests import *
from .score_tests import *
from .test_tests import *
from .utils_tests import *
|
py | 1a383cde1a2a834cdd77f3b5c41ebd253fe1d907 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-07-12 18:40
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dlp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AnalysisRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_updated', models.DateTimeField(default=django.utils.timezone.now, null=True, verbose_name='Analysis last updated date/time')),
('run_status', models.CharField(choices=[('idle', 'Idle'), ('error', 'Error'), ('running', 'Running'), ('archiving', 'Archiving'), ('complete', 'Complete'), ('align_complete', 'Align Complete'), ('hmmcopy_complete', 'Hmmcopy Complete')], default='idle', max_length=50, verbose_name='Run Status')),
('log_file', models.CharField(blank=True, default=None, max_length=1000, null=True, verbose_name='error_log')),
('sftp_path', models.CharField(blank=True, max_length=50, null=True, verbose_name='sftp path')),
('blob_path', models.CharField(blank=True, max_length=50, null=True, verbose_name='Blob path')),
],
),
migrations.CreateModel(
name='DlpAnalysisInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('analysis_jira_ticket', models.CharField(max_length=50, null=True, verbose_name='Analysis Jira ticket')),
('priority_level', models.CharField(choices=[('L', 'Low'), ('M', 'Medium'), ('H', 'High')], default='L', max_length=50, verbose_name='Priority Level')),
('aligner', models.CharField(choices=[('A', 'bwa-aln'), ('M', 'bwa-mem')], default='A', max_length=50, verbose_name='Aligner')),
('smoothing', models.CharField(choices=[('M', 'modal'), ('L', 'loess')], default='M', max_length=50, verbose_name='Smoothing')),
('analysis_submission_date', models.DateField(default=datetime.date.today, null=True, verbose_name='Analysis submission date')),
('verified', models.CharField(blank=True, choices=[('T', 'True'), ('F', 'False')], default='F', max_length=50, null=True, verbose_name='Verified')),
('analysis_run', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sisyphus.AnalysisRun')),
('lanes', models.ManyToManyField(blank=True, to='dlp.DlpLane')),
('library', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dlp.DlpLibrary', verbose_name='Library')),
],
options={
'ordering': ['pk'],
},
),
migrations.CreateModel(
name='DlpAnalysisVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=50, verbose_name='DlpAnalysis Version')),
],
),
migrations.CreateModel(
name='HistoricalAnalysisRun',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('last_updated', models.DateTimeField(default=django.utils.timezone.now, null=True, verbose_name='Analysis last updated date/time')),
('run_status', models.CharField(choices=[('idle', 'Idle'), ('error', 'Error'), ('running', 'Running'), ('archiving', 'Archiving'), ('complete', 'Complete'), ('align_complete', 'Align Complete'), ('hmmcopy_complete', 'Hmmcopy Complete')], default='idle', max_length=50, verbose_name='Run Status')),
('log_file', models.CharField(blank=True, default=None, max_length=1000, null=True, verbose_name='error_log')),
('sftp_path', models.CharField(blank=True, max_length=50, null=True, verbose_name='sftp path')),
('blob_path', models.CharField(blank=True, max_length=50, null=True, verbose_name='Blob path')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical analysis run',
'db_table': 'analysis_run_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalDlpAnalysisInformation',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('analysis_jira_ticket', models.CharField(max_length=50, null=True, verbose_name='Analysis Jira ticket')),
('priority_level', models.CharField(choices=[('L', 'Low'), ('M', 'Medium'), ('H', 'High')], default='L', max_length=50, verbose_name='Priority Level')),
('aligner', models.CharField(choices=[('A', 'bwa-aln'), ('M', 'bwa-mem')], default='A', max_length=50, verbose_name='Aligner')),
('smoothing', models.CharField(choices=[('M', 'modal'), ('L', 'loess')], default='M', max_length=50, verbose_name='Smoothing')),
('analysis_submission_date', models.DateField(default=datetime.date.today, null=True, verbose_name='Analysis submission date')),
('verified', models.CharField(blank=True, choices=[('T', 'True'), ('F', 'False')], default='F', max_length=50, null=True, verbose_name='Verified')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('analysis_run', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='sisyphus.AnalysisRun')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('library', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='dlp.DlpLibrary', verbose_name='Library')),
],
options={
'verbose_name': 'historical dlp analysis information',
'db_table': 'dlp_analysis_info_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalDlpAnalysisVersion',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('version', models.CharField(max_length=50, verbose_name='DlpAnalysis Version')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical dlp analysis version',
'db_table': 'dlp_history_analysis_version',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalReferenceGenome',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('reference_genome', models.CharField(max_length=50, verbose_name='reference_genome')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical reference genome',
'db_table': 'ref_genome_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ReferenceGenome',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference_genome', models.CharField(max_length=50, verbose_name='reference_genome')),
],
),
migrations.AddField(
model_name='historicaldlpanalysisinformation',
name='reference_genome',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='sisyphus.ReferenceGenome', verbose_name='ReferenceGenome'),
),
migrations.AddField(
model_name='historicaldlpanalysisinformation',
name='version',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='sisyphus.DlpAnalysisVersion', verbose_name='Analysis Version'),
),
migrations.AddField(
model_name='dlpanalysisinformation',
name='reference_genome',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sisyphus.ReferenceGenome', verbose_name='ReferenceGenome'),
),
migrations.AddField(
model_name='dlpanalysisinformation',
name='sequencings',
field=models.ManyToManyField(to='dlp.DlpSequencing'),
),
migrations.AddField(
model_name='dlpanalysisinformation',
name='version',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sisyphus.DlpAnalysisVersion', verbose_name='Analysis Version'),
),
]
|
py | 1a383f323e727fd9210c0f376073a36b5a6019c3 | """empty message
Revision ID: 914d00d1492a
Revises:
Create Date: 2020-07-01 23:19:51.549022
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '914d00d1492a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.Column('_password', sa.String(length=128), nullable=False),
sa.Column('is_delete', sa.Boolean(), nullable=False),
sa.Column('extension', sa.Integer(), nullable=True),
sa.Column('permission', sa.Integer(), nullable=False),
sa.Column('gender', sa.String(length=2), nullable=True),
sa.Column('is_super', sa.Boolean(), nullable=True),
sa.Column('address', sa.String(length=128), nullable=True),
sa.Column('e_mail', sa.String(length=128), nullable=True),
sa.Column('phone', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
# ### end Alembic commands ###
|
py | 1a38400ac0c03adc74308f2787b138ddf723e6d2 | import logging
import time
class Stage:
def __init__(self, total_tasks, seconds_per_tasks):
self.current_task_number = 0
self.total_tasks = total_tasks
self.seconds_per_task = seconds_per_tasks
def update_task_number(self, task_number):
self.current_task_number = task_number
def seconds_remaining(self):
return self.seconds_per_task * (self.total_tasks - self.current_task_number)
class Progress:
def __init__(self, localconfig):
self.time_start = time.time()
self.stages = {}
self.localconfig = localconfig
def add_stage(self, stage_name, num_tasks, seconds_per_task):
self.stages[stage_name] = Stage(num_tasks, seconds_per_task)
def report_message(self, message):
if self.localconfig.job:
self.localconfig.job.set_message(message)
logging.info({"message": message})
def report(self, num, message, stage_name="default"):
if stage_name in self.stages:
stage = self.stages[stage_name]
stage.update_task_number(num)
else:
logging.critical(f"Bad stage={stage_name} in {type(self).__name__}.report()")
return
seconds_left = sum(stage.seconds_remaining() for stage in self.stages.values())
# Write to db
if self.localconfig.job:
estimated_completion_timestamp = int(time.time() + seconds_left)
self.localconfig.job.set_in_progress(message, estimated_completion_timestamp)
else:
logging.info(
"message: %s, seconds_left: %s, time_elapsed: %s", message, seconds_left, time.time() - self.time_start
)
|
py | 1a38408798f56597a725aa14518a6197379d5422 | # 用户模型
# 查看用户处于啥状态
from flask_login import UserMixin
# 加密方案
from werkzeug.security import generate_password_hash, check_password_hash
from app.extentions import db, login_manager
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True)
password_hash = db.Column(db.String(150))
emil = db.Column(db.String(50), unique=True)
cinfirmed = db.Column(db.Boolean, default=False)
@property
def password(self):
raise AttributeError('密码不告诉你')
# 加密存储密码
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
# 密码校验
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
# 回到函数
# 根据用户id找到用户对象
@login_manager.user_loader
def load_user(uid):
return User.query.get(uid)
|
py | 1a3841cb452c11e8dc82c96f4c0595a6f4dab614 | from mystic.data import AbstractDataCollection, db
class DanceSong(db.Model):
__tablename__ = 'dance_song'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
song_length_millis = db.Column(db.Integer, nullable=False)
song_length = db.Column(db.Integer, nullable=False)
millis_per_bar = db.Column(db.Integer, nullable=False)
class DanceSongCollection(AbstractDataCollection):
__model__ = DanceSong
__indexby__ = 'id'
__filterby__ = 'id'
|
py | 1a38424f5d01b3b25ff2895b919e8d74901918da | import gc
import os
import time
# Import required modules
from pyaedt import Circuit
from pyaedt.generic.filesystem import Scratch
from pyaedt.generic.TouchstoneParser import read_touchstone
# Setup paths for module imports
from _unittest.conftest import local_path, scratch_path, config
try:
import pytest # noqa: F401
except ImportError:
import _unittest_ironpython.conf_unittest as pytest # noqa: F401
original_project_name = "Galileo_t21"
test_project_name = "Galileo_t21"
netlist1 = "netlist_small.cir"
netlist2 = "Schematic1.qcv"
touchstone = "SSN_ssn.s6p"
touchstone2 = "Galileo_V3P3S0.ts"
ami_project = "AMI_Example"
class TestClass:
def setup_class(self):
with Scratch(scratch_path) as self.local_scratch:
time.sleep(2)
example_project = os.path.join(local_path, "example_models", original_project_name + ".aedt")
netlist_file1 = os.path.join(local_path, "example_models", netlist1)
netlist_file2 = os.path.join(local_path, "example_models", netlist2)
touchstone_file = os.path.join(local_path, "example_models", touchstone)
touchstone_file2 = os.path.join(local_path, "example_models", touchstone2)
self.test_project = self.local_scratch.copyfile(
example_project, os.path.join(self.local_scratch.path, test_project_name + ".aedt")
)
self.local_scratch.copyfile(netlist_file1)
self.local_scratch.copyfile(netlist_file2)
self.local_scratch.copyfile(touchstone_file)
self.local_scratch.copyfile(touchstone_file2)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", original_project_name + ".aedb"),
os.path.join(self.local_scratch.path, test_project_name + ".aedb"),
)
ami_example_project = os.path.join(local_path, "example_models", ami_project + ".aedt")
self.ami_example_project = self.local_scratch.copyfile(ami_example_project)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", ami_project + ".aedb"),
os.path.join(self.local_scratch.path, ami_project + ".aedb"),
)
self.aedtapp = Circuit(self.test_project)
def teardown_class(self):
self.aedtapp._desktop.ClearMessages("", "", 3)
for proj in self.aedtapp.project_list:
try:
self.aedtapp.close_project(proj, saveproject=False)
except:
pass
self.local_scratch.remove()
gc.collect()
def test_01_create_inductor(self):
myind = self.aedtapp.modeler.schematic.create_inductor(value=1e-9, location=[0.2, 0.2])
assert type(myind.id) is int
assert myind.parameters["L"] == "1e-09"
def test_02_create_resistor(self):
myres = self.aedtapp.modeler.schematic.create_resistor(value=50, location=[0.4, 0.2])
assert type(myres.id) is int
assert myres.parameters["R"] == "50"
def test_03_create_capacitor(self):
mycap = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12, location=[0.6, 0.2])
assert type(mycap.id) is int
assert mycap.parameters["C"] == "1e-12"
def test_04_getpin_names(self):
mycap2 = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12)
pinnames = self.aedtapp.modeler.schematic.get_pins(mycap2)
pinnames2 = self.aedtapp.modeler.schematic.get_pins(mycap2.id)
pinnames3 = self.aedtapp.modeler.schematic.get_pins(mycap2.composed_name)
assert pinnames2 == pinnames3
assert type(pinnames) is list
assert len(pinnames) == 2
def test_05_getpin_location(self):
for el in self.aedtapp.modeler.schematic.components:
pinnames = self.aedtapp.modeler.schematic.get_pins(el)
for pinname in pinnames:
pinlocation = self.aedtapp.modeler.schematic.get_pin_location(el, pinname)
assert len(pinlocation) == 2
def test_06_add_3dlayout_component(self):
myedb = self.aedtapp.modeler.schematic.add_subcircuit_3dlayout("Galileo_G87173_204")
assert type(myedb.id) is int
def test_07_add_hfss_component(self):
my_model, myname = self.aedtapp.modeler.schematic.create_field_model(
"uUSB", "Setup1 : Sweep", ["usb_N_conn", "usb_N_pcb", "usb_P_conn", "usb_P_pcb"]
)
assert type(my_model) is int
def test_07a_push_excitation(self):
setup_name = "LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=False)
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=True)
def test_08_import_mentor_netlist(self):
self.aedtapp.insert_design("MentorSchematicImport")
assert self.aedtapp.create_schematic_from_mentor_netlist(os.path.join(self.local_scratch.path, netlist2))
pass
def test_09_import_netlist(self):
self.aedtapp.insert_design("SchematicImport")
assert self.aedtapp.create_schematic_from_netlist(os.path.join(self.local_scratch.path, netlist1))
def test_10_import_touchstone(self):
self.aedtapp.insert_design("Touchstone_import")
ports = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone))
ports2 = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone2))
numports = len(ports)
assert numports == 6
numports2 = len(ports2)
assert numports2 == 3
tx = ports[: int(numports / 2)]
rx = ports[int(numports / 2) :]
insertions = ["dB(S({},{}))".format(i, j) for i, j in zip(tx, rx)]
assert self.aedtapp.create_touchstone_report("Insertion Losses", insertions)
touchstone_data = self.aedtapp.get_touchstone_data(insertions)
assert touchstone_data
def test_11_export_fullwave(self):
output = self.aedtapp.export_fullwave_spice(
os.path.join(self.local_scratch.path, touchstone), is_solution_file=True
)
assert output
def test_12_connect_components(self):
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.schematic.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.schematic.create_capacitor("C100", 1e-12)
portname = self.aedtapp.modeler.schematic.create_interface_port("Port1")
assert "Port1" in portname.name
assert self.aedtapp.modeler.connect_schematic_components(myind.id, myind.id, pinnum_second=2)
assert self.aedtapp.modeler.connect_schematic_components(myres.id, mycap.id, pinnum_first=1)
# create_interface_port
L1_pins = myind.pins
L1_pin2location = {}
for pin in L1_pins:
L1_pin2location[pin.name] = pin.location
C1_pins = mycap.pins
C1_pin2location = {}
for pin in C1_pins:
C1_pin2location[pin.name] = pin.location
portname = self.aedtapp.modeler.schematic.create_interface_port(
"P1_1", [L1_pin2location["n1"][0], L1_pin2location["n1"][1]]
)
assert "P1_1" in portname.name
portname = self.aedtapp.modeler.schematic.create_interface_port(
"P2_2", [C1_pin2location["negative"][0], C1_pin2location["negative"][1]]
)
assert "P2_2" in portname.name
# create_page_port
portname = self.aedtapp.modeler.schematic.create_page_port(
"Link_1", [L1_pin2location["n2"][0], L1_pin2location["n2"][1]]
)
assert "Link_1" in portname.name
portname = self.aedtapp.modeler.schematic.create_page_port(
"Link_2", [C1_pin2location["positive"][0], C1_pin2location["positive"][1]], 180
)
assert "Link_2" in portname.name
def test_13_properties(self):
assert self.aedtapp.modeler.model_units
def test_14_move(self):
assert self.aedtapp.modeler.move("L100", [0.00508, 0.00508])
assert self.aedtapp.modeler.move("L100", [200, 200], "mil")
def test_15_rotate(self):
assert self.aedtapp.modeler.rotate("L100")
def test_16_read_touchstone(self):
data = read_touchstone(os.path.join(self.local_scratch.path, touchstone))
assert len(data.expressions) > 0
assert data.data_real()
assert data.data_imag()
assert data.data_db()
def test_17_create_setup(self):
setup_name = "Dom_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.SweepDefinition = [
("Variable", "Freq"),
("Data", "LIN 1GHz 5GHz 1001"),
("OffsetF1", False),
("Synchronize", 0),
]
assert LNA_setup.update()
@pytest.mark.skipif(os.name == "posix", reason="To be investigated on linux.")
def test_18_export_touchstone(self):
assert self.aedtapp.analyze_nominal()
time.sleep(30)
assert self.aedtapp.export_touchstone("Dom_LNA", "Dom_LNA", os.path.join(self.local_scratch.path, "new.s2p"))
def test_19A_create_sweeps(self):
setup_name = "Sweep_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.add_sweep_step("Freq", 1, 2, 0.01, "GHz", override_existing_sweep=True)
assert LNA_setup.props["SweepDefinition"]["Data"] == "LIN 1GHz 2GHz 0.01GHz"
LNA_setup.add_sweep_points("Freq", [11, 12, 13.4], "GHz", override_existing_sweep=False)
assert "13.4GHz" in LNA_setup.props["SweepDefinition"]["Data"]
assert "LIN 1GHz 2GHz 0.01GHz" in LNA_setup.props["SweepDefinition"]["Data"]
LNA_setup.add_sweep_count("Temp", 20, 100, 81, "cel", count_type="Decade", override_existing_sweep=True)
assert isinstance(LNA_setup.props["SweepDefinition"], list)
assert LNA_setup.props["SweepDefinition"][1]["Variable"] == "Temp"
assert LNA_setup.props["SweepDefinition"][1]["Data"] == "DEC 20cel 100cel 81"
def test_19B_create_EyE_setups(self):
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
setup_name = "Dom_AMI"
assert self.aedtapp.create_setup(setup_name, "NexximAMI")
def test_20_create_AMI_plots(self):
self.aedtapp.load_project(self.ami_example_project, close_active_proj=True)
report_name = "MyReport"
assert (
self.aedtapp.post.create_ami_initial_response_plot(
"AMIAnalysis",
"b_input_15",
self.aedtapp.available_variations.nominal,
plot_type="Rectangular Stacked Plot",
plot_final_response=True,
plot_intermediate_response=True,
plotname=report_name,
)
== report_name
)
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
assert (
self.aedtapp.post.create_ami_statistical_eye_plot(
"AMIAnalysis", "b_output4_14", self.aedtapp.available_variations.nominal, plotname="MyReport1"
)
== "MyReport1"
)
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Quick",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportQ",
)
== "MyReportQ"
)
@pytest.mark.skipif(config["desktopVersion"] > "2021.2", reason="Skipped on versions higher than 2021.2")
def test_20B_create_AMI_plots(self):
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Verify",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportV",
)
== "MyReportV"
)
def test_21_assign_voltage_sinusoidal_excitation_to_ports(self):
settings = ["123 V", "10deg", "", "", "0V", "15GHz", "0s", "0", "0deg", ""]
ports_list = ["P1_1", "P2_2"]
assert self.aedtapp.assign_voltage_sinusoidal_excitation_to_ports(ports_list, settings)
def test_22_assign_current_sinusoidal_excitation_to_ports(self):
settings = ["", "", "20A", "50A", "4A", "", "0s", "0", "0deg", "1", "20Hz"]
ports_list = ["P1_1"]
assert self.aedtapp.assign_current_sinusoidal_excitation_to_ports(ports_list, settings)
def test_23_assign_power_sinusoidal_excitation_to_ports(self):
settings = ["", "", "", "", "20W", "14GHz", "0s", "0", "0deg", "0Hz"]
ports_list = ["P2_2"]
assert self.aedtapp.assign_power_sinusoidal_excitation_to_ports(ports_list, settings)
def test_24_new_connect_components(self):
self.aedtapp.insert_design("Components")
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.components.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.components.create_capacitor("C100", 1e-12)
myind2 = self.aedtapp.modeler.components.create_inductor("L101", 1e-9)
port = self.aedtapp.modeler.components.create_interface_port("Port1")
assert self.aedtapp.modeler.schematic.connect_components_in_series([myind, myres.composed_name])
assert self.aedtapp.modeler.schematic.connect_components_in_parallel([mycap, port, myind2.id])
def test_25_import_model(self):
self.aedtapp.insert_design("Touch_import")
touch = os.path.join(local_path, "example_models", "SSN_ssn.s6p")
t1 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t1
assert len(t1.pins) == 6
t2 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t2
|
py | 1a3843966576f008ce570054cbe445f75911c853 | fruits = ['apple', 'pear', 'strawberry']
fruits.append('peach')
fruits[1] = 'blueberry'
for fruit in fruits:
print(fruit)
# Tuples
position = (2, 3)
color = (255, 255, 255)
print(type(color))
|
py | 1a3843bdf1c480adda57a8ecccc96f1648635e25 | #!/usr/bin/env python
"""
Queries running in SQL Server database (ODBC)
"""
import sys
import lib_common
from lib_properties import pc
from sources_types.odbc import dsn as survol_odbc_dsn
from sources_types.sqlserver import dsn as survol_sqlserver_dsn
from sources_types.sqlserver import session
from sources_types.sqlserver import query as sql_query
try:
import pyodbc
except ImportError:
lib_common.ErrorMessageHtml("pyodbc Python library not installed")
def Main():
cgiEnv = lib_common.CgiEnv()
grph = cgiEnv.GetGraph()
dsnNam = survol_odbc_dsn.GetDsnNameFromCgi(cgiEnv)
DEBUG("dsn=(%s)", dsnNam)
nodeDsn = survol_sqlserver_dsn.MakeUri(dsnNam)
ODBC_ConnectString = survol_odbc_dsn.MakeOdbcConnectionString(dsnNam)
try:
cnxn = pyodbc.connect(ODBC_ConnectString)
DEBUG("Connected: %s", dsnNam)
cursorQueries = cnxn.cursor()
qryQueries = """
SELECT sqltext.TEXT,
req.session_id,
req.status,
sess.host_process_id,
sess.host_name
FROM sys.dm_exec_requests req
CROSS APPLY sys.dm_exec_sql_text(sql_handle) AS sqltext
, sys.dm_exec_sessions sess
where sess.session_id = req.session_id
"""
propSqlServerSqlQuery = lib_common.MakeProp("Sql query")
propSqlServerHostProcess = lib_common.MakeProp("Host process")
propSqlServerStatus = lib_common.MakeProp("Status")
for rowQry in cursorQueries.execute(qryQueries):
DEBUG("rowQry.session_id=(%s)", rowQry.session_id)
nodeSession = session.MakeUri(dsnNam, rowQry.session_id)
# A bit of cleanup.
queryClean = rowQry.TEXT.replace("\n", " ").strip()
# TODO: Must add connection information so we can go from the tables to sqlserver itself.
nodeSqlQuery = sql_query.MakeUri(queryClean,dsnNam)
grph.add((nodeSession, propSqlServerSqlQuery, nodeSqlQuery))
node_process = lib_common.RemoteBox(rowQry.host_name).PidUri(rowQry.host_process_id)
grph.add((node_process, pc.property_pid, lib_common.NodeLiteral(rowQry.host_process_id)))
grph.add((nodeSession, propSqlServerHostProcess, node_process))
grph.add((nodeSession, propSqlServerStatus, lib_common.NodeLiteral(rowQry.status)))
except Exception:
exc = sys.exc_info()[0]
lib_common.ErrorMessageHtml(
"nodeDsn=%s Unexpected error:%s" % (dsnNam, str(sys.exc_info())))
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
# http://www.easysoft.com/developer/languages/python/pyodbc.html
|
py | 1a384443f5fdaa23c3308b6f96fab4b51ad4d4f3 | import random, sys
from Person import Person
from Virus import Virus
from FileWriter import FileWriter
class Simulation:
def __init__(self, initial_vaccinated, initial_infected, initial_healthy, virus, resultsfilename):
'''Set up the initial simulation values'''
self.virus = virus
self.initial_infected = initial_infected
self.initial_healthy = initial_healthy
self.initial_vaccinated = initial_vaccinated
self.population = []
self.population_size = initial_infected + initial_healthy + initial_vaccinated
self.total_dead = 0
self.total_vaccinated = initial_vaccinated
self.file_writer = FileWriter(resultsfilename)
def create_population(self):
'''Creates the population (a list of Person objects) consisting of
initial infected people, initial healthy non-vaccinated people, and
initial healthy vaccinated people. Adds them to the population list'''
for i in range(self.initial_infected):
person = Person(False, virus)
self.population.append(person)
for i in range(self.initial_healthy):
person = Person(False, None)
self.population.append(person)
for i in range(self.initial_vaccinated):
person = Person(True, None)
self.population.append(person)
def print_population(self):
'''Prints out every person in the population and their current attributes'''
#TODO: finish this method
def get_infected(self):
'''Gets all the infected people from the population and returns them as a list'''
#TODO: finish this method
def simulation_should_continue(self):
'''Determines whether the simulation should continue.
If everyone in the population is dead then return False, the simulation
should not continue If everyone in the population is vaccinated return False
If there are no more infected people left and everyone is either vaccinated
or dead return False In all other cases return True'''
#TODO: finish this method
def run(self):
''' This method should run the simulation until all requirements for ending
the simulation are met.
'''
self.create_population()
random.shuffle(self.population)
self.print_population()
time_step_counter = 0
should_continue = True
self.file_writer.init_file(self.virus, self.population_size, self.initial_vaccinated, self.initial_healthy, self.initial_infected)
#keep looping until the simulation ends
while self.simulation_should_continue():
#save the current infected
old_infected = self.get_infected()
self.time_step(old_infected)
#time step will create newly infected people, just determine the survivial of the previous infected people
self.determine_survival(old_infected)
time_step_counter += 1
print(f'The simulation has ended after {time_step_counter} turns.')
self.file_writer.write_results(time_step_counter, self.total_dead, self.total_vaccinated)
def determine_survival(self, infected):
'''Check if the current infected people survive their infection
Call the did_survive_infection() method if it returns false then the person
is no longer alive, does not have an infection and one is added to total dead
if it returns true then the person no longer has an infection and is vaccinated,
one is added to total vaccinated'''
#TODO: finish this method
def time_step(self, infected):
''' For every infected person interact with a random person from the
population 10 times'''
for infected_person in infected:
for i in range(10):
#TODO: get a random index for the population list
#TODO: using the random index get a random person from the population
#TODO: call interaction() with the current infected person and the random person
pass
def interaction(self, infected, random_person):
'''If the infected person is the same object as the random_person return
and do nothing
if the random person is not alive return and do nothing
if the random person is vaccinated return and do nothing
if the random person is not vaccinated:
generate a random float between 0 and 1
if the random float is less then the infected person's virus reproduction
number then the random person is infected
othersie the random person is vaccinated and one is added to the total
vaccinated'''
#TODO: finish this method
if __name__ == "__main__":
#Set up the initial simulations values
virus_name = "Malaise"
reproduction_num = 0.20
mortality_num = .99
initial_healthy = 10
initial_vaccinated = 5
initial_infected = 1
virus = Virus(virus_name, reproduction_num, mortality_num)
simulation = Simulation(initial_vaccinated, initial_infected, initial_healthy, virus, "results.txt")
#run the simulation
simulation.run()
|
py | 1a3844e296cb67572875a7ac0df6234f2d272120 | import sys
import json
import time
from itertools import combinations
import requests
from ratelimit import limits, sleep_and_retry
def loop_possibilities(upper_bound, n_combinations=3):
return combinations(range(1, upper_bound + 1), n_combinations)
@sleep_and_retry
@limits(calls=5, period=30) # decorator
def _http_request(url, method='GET', params={}, json_data={}):
''' Returns a json from the url inputted
input(s): Any url
output: r_json
'''
prep = requests.Request(method, url).prepare()
if json_data:
prep.prepare_body(data=json_data, files=None, json=True)
return requests.Session().send(prep)
def main():
# pegar endpoint que retorna a lista de possibilidades do formulário anterior
# ao endpoint dos desafios
list_possibilities = loop_possibilities(27, 3)
list_data = []
for combination in list_possibilities:
url = 'https://www.brfhub.com/backend/public/api/get-challenges?language=pt&profile=startup&areas%5B%5D={}&areas%5B%5D={}&areas%5B%5D={}'.format(combination[0], combination[1], combination[2])
r = _http_request(url, 'GET')
if r.status_code != 200:
continue
dict_data = r.json()
list_data.append(dict_data['data']) # olhar depois
return list_data
if __name__ == '__main__':
print(main())
|
py | 1a384595c3b649ddee9042c9d8a75bad0bc2b316 | # https://github.com/pokurt/Nana-Remix/blob/5ec27fcc124e7438b2816731c07ea4a129dc9a4d/nana/utils/aiohttp_helper.py#L4
# ported from nana remix
import aiohttp
class AioHttp:
@staticmethod
async def get_json(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.json()
@staticmethod
async def get_text(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.text()
@staticmethod
async def get_raw(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.read()
@staticmethod
async def get_status(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return resp.status
|
py | 1a3846e050bda6cbfcc248973c4e2713f818eecb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class CheckError(Exception):
pass
class AlertError(Exception):
pass
class NotificationError(Exception):
pass
class SecurityError(Exception):
pass
class ConfigurationError(CheckError):
def __init__(self, message):
message = 'Configuration error: {}'.format(message)
super(ConfigurationError, self).__init__(message)
class InsufficientPermissionsError(CheckError):
def __init__(self, user, entity):
self.user = user
self.entity = entity
def __str__(self):
return 'Insufficient permisions for user {} to access {}'.format(self.user, self.entity)
class JmxQueryError(CheckError):
def __init__(self, message):
self.message = message
super(JmxQueryError, self).__init__()
def __str__(self):
return 'JMX Query failed: {}'.format(self.message)
class HttpError(CheckError):
def __init__(self, message, url=None):
self.message = message
self.url = url
super(HttpError, self).__init__()
def __str__(self):
return 'HTTP request failed for {}: {}'.format(self.url, self.message)
class DbError(CheckError):
def __init__(self, message, operation=None):
self.message = message
self.operation = operation
super(DbError, self).__init__()
def __str__(self):
return 'DB operation {} failed: {}'.format(self.operation, self.message)
class ResultSizeError(CheckError):
def __init__(self, message):
message = 'Result size error: {}'.format(message)
super(ResultSizeError, self).__init__(message)
|
py | 1a3847b45cd3a1938b6f08d4ec64af1ecec5a73a | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from pkg_resources import get_distribution
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Do not generate APIdocs for members missing docstrings (undoc-members)
os.environ['APIDOC_OPTIONS'] = 'members,show-inheritence,inherited-members'
# Set APIDOC options
#os.environ['SPHINX_APIDOC_OPTIONS'] = 'members,undoc-members,show-inheritance,special-members'
os.environ['SPHINX_APIDOC_OPTIONS'] = 'members'
# -- Project information -----------------------------------------------------
project = 'VerMoUTH'
copyright = '2018, University of Groningen'
author = 'Peter C Kroon, Jonathan Barnoud, Tsjerk A Wassenaar, Siewert-Jan Marrink'
# The full version, including alpha/beta/rc tags
release = get_distribution('vermouth').version
# The short X.Y version
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxcontrib.apidoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
nitpick_ignore = [
('py:class', 'networkx.algorithms.isomorphism.vf2userfunc.GraphMatcher'),
('py:class', 'networkx.algorithms.isomorphism.isomorphvf2.GraphMatcher'),
('py:class', 'networkx.classes.graph.Graph'),
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'VerMoUTHdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VerMoUTH.tex', 'VerMoUTH Documentation',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vermouth', 'VerMoUTH Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VerMoUTH', 'VerMoUTH Documentation',
author, 'VerMoUTH', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
apidoc_module_dir = '../../vermouth'
apidoc_output_dir = 'api'
apidoc_separate_modules = True
apidoc_excluded_paths = ['tests', 'redistributed']
autoclass_content = 'both'
autodoc_default_options = {'members': None,
'undoc-members': None,
'show-inheritance': None}
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'networkx': ('https://networkx.github.io/documentation/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
}
|
py | 1a3847bfe68f0de0e09d3f78393dca0b2f0456ed | # Copyright 2018 Lenovo (Beijing) Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as object_base
from cyborg.objects import base
from cyborg.objects.control_path import ControlpathID
from cyborg.objects.device import Device
from cyborg.objects.driver_objects.driver_controlpath_id import \
DriverControlPathID
from cyborg.objects.driver_objects.driver_deployable import DriverDeployable
from cyborg.objects import fields as object_fields
@base.CyborgObjectRegistry.register
class DriverDevice(base.DriverObjectBase,
object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vendor': object_fields.StringField(nullable=False),
'model': object_fields.StringField(nullable=False),
'type': object_fields.DeviceTypeField(nullable=False),
'std_board_info': object_fields.StringField(nullable=True),
# vendor board info should be a dict for driver-specific resource
# provider.
'vendor_board_info': object_fields.StringField(nullable=True),
# hostname will be set by the agent, so driver don't need to report.
# Each controlpath_id corresponds to a different PF. For now
# we are sticking with a single cpid.
'controlpath_id': object_fields.ObjectField('DriverControlPathID',
nullable=False),
'deployable_list': object_fields.ListOfObjectsField('DriverDeployable',
default=[],
nullable=False),
'stub': object_fields.BooleanField(nullable=False, default=False)
}
def create(self, context, host):
"""Create a driver-side Device Object into DB. This object will be
stored in many db tables: device, deployable, attach_handle,
controlpath_id etc. by calling related Object.
"""
# first store in device table through Device Object.
device_obj = Device(context=context,
type=self.type,
vendor=self.vendor,
model=self.model,
hostname=host
)
if hasattr(self, 'std_board_info'):
device_obj.std_board_info = self.std_board_info
if hasattr(self, 'vendor_board_info'):
device_obj.vendor_board_info = self.vendor_board_info
device_obj.create(context)
# for the controlpath_id, call driver_controlpath_id to create.
cpid_obj = self.controlpath_id.create(context, device_obj.id)
# for deployable_list, call internal layer object: driver_deployable
# to create.
for driver_deployable in self.deployable_list:
driver_deployable.create(context, device_obj.id, cpid_obj.id)
def destroy(self, context, host):
"""Delete a driver-side Device Object from db. This should
delete the internal layer objects.
"""
# get dev_obj_list from hostname
device_obj = self.get_device_obj(context, host)
# delete deployable_list first.
for driver_deployable in self.deployable_list:
driver_deployable.destroy(context, device_obj.id)
if hasattr(self.controlpath_id, 'cpid_info'):
cpid_obj = ControlpathID.get_by_device_id_cpidinfo(
context, device_obj.id, self.controlpath_id.cpid_info)
# delete controlpath_id
cpid_obj.destroy(context)
# delete the device
device_obj.destroy(context)
def get_device_obj(self, context, host):
"""Get a driver-side Device Object from db.
:param context: requested context.
:param host: hostname of the node.
:return: a device object of current driver device object. It will
return on value because it has controlpath_id.
"""
# get dev_obj_list from hostname
device_obj_list = Device.get_list_by_hostname(context, host)
# use controlpath_id.cpid_info to identiy one Device.
for device_obj in device_obj_list:
# get cpid_obj, could be empty or only one value.
cpid_obj = ControlpathID.get_by_device_id_cpidinfo(
context, device_obj.id, self.controlpath_id.cpid_info)
# find the one cpid_obj with cpid_info
if cpid_obj is not None:
return device_obj
@classmethod
def list(cls, context, host):
"""Form driver-side device object list from DB for one host.
A list may contains driver_device_object without controlpath_id.(In
the case some of controlpath_id can't store successfully but its
devices stores successfully.)
"""
# get dev_obj_list from hostname
dev_obj_list = Device.get_list_by_hostname(context, host)
driver_dev_obj_list = []
for dev_obj in dev_obj_list:
cpid = DriverControlPathID.get(context, dev_obj.id)
# NOTE: will not return device without controlpath_id.
if cpid is not None:
driver_dev_obj = \
cls(context=context, vendor=dev_obj.vendor,
model=dev_obj.model, type=dev_obj.type,
std_board_info=dev_obj.std_board_info,
vendor_board_info=dev_obj.vendor_board_info,
controlpath_id=cpid,
deployable_list=DriverDeployable.list(context,
dev_obj.id)
)
driver_dev_obj_list.append(driver_dev_obj)
return driver_dev_obj_list
def get_device_obj_by_device_id(self, context, device_id):
"""Get device object by device id.
:param context: requested context.
:param host: hostname of the node.
:return: a device object of current driver device object. It will
return on value because it has controlpath_id.
"""
# get dev_obj_list from hostname
device_obj = Device.get_by_device_id(context, device_id)
# use controlpath_id.cpid_info to identiy one Device.
# get cpid_obj, could be empty or only one value.
ControlpathID.get_by_device_id_cpidinfo(
context, device_obj.id, self.controlpath_id.cpid_info)
# find the one cpid_obj with cpid_info
return device_obj
|
py | 1a3848f58d4de81983b594281fa59d15f0fc63ac | from django.utils.html import escape
from core.templatetags import register
from django.utils.safestring import mark_safe
@register.simple_tag(takes_context=True)
def text_element(
context,
id,
label,
errors=None,
data_mode=None,
name=None,
textarea=None,
value=None,
hint=None,
password=False,
readonly=False,
autocomplete=None,
):
"""
Display one or more error messages
"""
output = []
type = "password" if password else "text"
readonly = "readonly" if readonly else ""
if autocomplete:
autocomplete = f'autocomplete="{autocomplete}" '
else:
autocomplete = ""
if value is None:
value = context.get(id, "")
else:
value = escape(value)
output.append('<div class="form-group type-text ')
if id and errors and id in errors:
output.append("form-group-error ")
output.append('"')
if data_mode:
output.append(' data-attach="Typeahead"')
output.append(">")
output.append(f'<label class="form-label" for="{ id }">{ label }')
if hint:
output.append(f'<span class="form-hint">{ hint }</span>')
output.append("</label>")
name = name or id
if name and errors and name in errors:
message = errors[name]
output.append(f'<span class="error-message" id="{ name }_error">{ message }</span>')
if data_mode: # for typeahead elements
output.append(
f'<input { autocomplete } class="form-control" id="{ id }" type="text" data-mode="{ data_mode }" name="{ name }" { readonly } value="{ value }">' # noqa: E501
)
elif textarea:
output.append(
f'<textarea class="form-control" id="{ id }" name="{ name }" { readonly }>{ value }</textarea>' # noqa: E501
)
else:
output.append(
f'<input { autocomplete }class="form-control" id="{ id }" type="{ type }" name="{ name }" value="{ value }" { readonly }>' # noqa: E501
)
output.append("</div>")
return mark_safe("".join(output))
|
py | 1a3848f6c128cb1fa757f61fa3bca22e1fc3d975 | from typing import List
class Grid:
def __init__(self):
self.grid_mat = Grid.__initialise_grid_mat()
def visualise(self) -> None:
grid_mat_element_gen = self.__create_grid_mat_element_generator()
str_rows = list()
for ri in range(7):
if ri % 2 == 0:
str_rows.append('+---+---+---+')
continue
str_row = ''
for ci in range(13):
if ci % 4 == 0:
str_row += '|'
elif ci % 2 != 0:
str_row += ' '
else:
str_row += next(grid_mat_element_gen)
str_rows.append(str_row)
print('\n'.join(str_rows))
def visualise_v2(self):
str_rows = [
' {} | {} | {} '.format(*self.grid_mat[0][:]),
'----+---+-----',
' {} | {} | {} '.format(*self.grid_mat[1][:]),
'----+---+-----',
' {} | {} | {} '.format(*self.grid_mat[2][:])
]
print('\n'.join(str_rows))
def __create_grid_mat_element_generator(self):
for ri in range(3):
for ci in range(3):
yield self.grid_mat[ri][ci]
@staticmethod
def __initialise_grid_mat() -> List[List[str]]:
grid = [[' ' for _ in range(3)] for _ in range(3)]
return grid
print('Visualise grid:')
grid = Grid()
print('Step 1')
grid.grid_mat[1][1] = 'X'
grid.visualise()
print()
print('Step 2')
grid.grid_mat[0][2] = 'O'
grid.visualise()
print()
print('New visualisation method:')
grid = Grid()
print('Step 1')
grid.grid_mat[1][1] = 'X'
grid.visualise_v2()
print()
print('Step 2')
grid.grid_mat[0][2] = 'O'
grid.visualise_v2()
print()
|
py | 1a3849f822490a01d688fa435598c79e3912fdd7 | # Copyright 2011 OpenStack LLC.
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from cinder.api.v2 import types
from cinder.api.views import types as views_types
from cinder import exception
from cinder.openstack.common import timeutils
from cinder import test
from cinder.tests.api import fakes
from cinder.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='vol_type_%s' % str(id),
extra_specs=specs,
)
def return_volume_types_get_all_types(context):
return dict(
vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3)
)
def return_empty_volume_types_get_all_types(context):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(int(id))
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
self.controller = types.VolumeTypesController()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['volume_types']))
expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
actual_names = map(lambda e: e['name'], res_dict['volume_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['volume_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['volume_types']))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(1, len(res_dict))
self.assertEqual('1', res_dict['volume_type']['id'])
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_type = dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertTrue('volume_type' in output)
expected_volume_type = dict(
name='new_type',
extra_specs={},
id=42,
)
self.assertDictMatch(output['volume_type'], expected_volume_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_volume_types)
self.assertTrue('volume_types' in output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
extra_specs={},
id=42 + i
)
self.assertDictMatch(output['volume_types'][i],
expected_volume_type)
class VolumeTypesSerializerTest(test.TestCase):
def _verify_volume_type(self, vtype, tree):
self.assertEqual('volume_type', tree.tag)
self.assertEqual(vtype['name'], tree.get('name'))
self.assertEqual(str(vtype['id']), tree.get('id'))
self.assertEqual(1, len(tree))
extra_specs = tree[0]
self.assertEqual('extra_specs', extra_specs.tag)
seen = set(vtype['extra_specs'].keys())
for child in extra_specs:
self.assertTrue(child.tag in seen)
self.assertEqual(vtype['extra_specs'][child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(len(seen), 0)
def test_index_serializer(self):
serializer = types.VolumeTypesTemplate()
# Just getting some input data
vtypes = return_volume_types_get_all_types(None)
text = serializer.serialize({'volume_types': vtypes.values()})
tree = etree.fromstring(text)
self.assertEqual('volume_types', tree.tag)
self.assertEqual(len(vtypes), len(tree))
for child in tree:
name = child.get('name')
self.assertTrue(name in vtypes)
self._verify_volume_type(vtypes[name], child)
def test_voltype_serializer(self):
serializer = types.VolumeTypeTemplate()
vtype = stub_volume_type(1)
text = serializer.serialize(dict(volume_type=vtype))
tree = etree.fromstring(text)
self._verify_volume_type(vtype, tree)
|
py | 1a384d4444f55149043470cf20f4c46cc13eaca1 | def main():
n = int(input())
for i in range(n):
a = int(input())
b = set([int(x) for x in input().split()])
if (max(b) - min(b)) < len(b):
print("YES")
else:
print("NO")
if __name__ == '__main__':
main()
|
py | 1a384d91ec52a81546279b8b1eed1037cc0a348f | import sqlite3
from datetime import datetime
from os import listdir
import os
import re
import json
import shutil
import pandas as pd
from application_logging.logger import App_Logger
class Prediction_Data_validation:
"""
This class shall be used for handling all the validation done on the Raw Prediction Data!!.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self,path):
self.Batch_Directory = path
self.schema_path = 'schema_prediction.json'
self.logger = App_Logger()
def valuesFromSchema(self):
"""
Method Name: valuesFromSchema
Description: This method extracts all the relevant information from the pre-defined "Schema" file.
Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns
On Failure: Raise ValueError,KeyError,Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
with open(self.schema_path, 'r') as f:
dic = json.load(f)
f.close()
pattern = dic['SampleFileName']
LengthOfDateStampInFile = dic['LengthOfDateStampInFile']
LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']
column_names = dic['ColName']
NumberofColumns = dic['NumberofColumns']
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
message ="LengthOfDateStampInFile:: %s" %LengthOfDateStampInFile + "\t" + "LengthOfTimeStampInFile:: %s" % LengthOfTimeStampInFile +"\t " + "NumberofColumns:: %s" % NumberofColumns + "\n"
self.logger.log(file,message)
file.close()
except ValueError:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file,"ValueError:Value not found inside schema_training.json")
file.close()
raise ValueError
except KeyError:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, "KeyError:Key value error incorrect key passed")
file.close()
raise KeyError
except Exception as e:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, str(e))
file.close()
raise e
return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns
def manualRegexCreation(self):
"""
Method Name: manualRegexCreation
Description: This method contains a manually defined regex based on the "FileName" given in "Schema" file.
This Regex is used to validate the filename of the prediction data.
Output: Regex pattern
On Failure: None
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
regex = "['visibility']+['\_'']+[\d_]+[\d]+\.csv"
return regex
def createDirectoryForGoodBadRawData(self):
"""
Method Name: createDirectoryForGoodBadRawData
Description: This method creates directories to store the Good Data and Bad Data
after validating the prediction data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = os.path.join("Prediction_Raw_Files_Validated/", "Good_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
path = os.path.join("Prediction_Raw_Files_Validated/", "Bad_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
except OSError as ex:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while creating Directory %s:" % ex)
file.close()
raise OSError
def deleteExistingGoodDataTrainingFolder(self):
"""
Method Name: deleteExistingGoodDataTrainingFolder
Description: This method deletes the directory made to store the Good Data
after loading the data in the table. Once the good files are
loaded in the DB,deleting the directory ensures space optimization.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Prediction_Raw_Files_Validated/'
# if os.path.isdir("ids/" + userName):
# if os.path.isdir(path + 'Bad_Raw/'):
# shutil.rmtree(path + 'Bad_Raw/')
if os.path.isdir(path + 'Good_Raw/'):
shutil.rmtree(path + 'Good_Raw/')
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"GoodRaw directory deleted successfully!!!")
file.close()
except OSError as s:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def deleteExistingBadDataTrainingFolder(self):
"""
Method Name: deleteExistingBadDataTrainingFolder
Description: This method deletes the directory made to store the bad Data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Prediction_Raw_Files_Validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"BadRaw directory deleted before starting validation!!!")
file.close()
except OSError as s:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def moveBadFilesToArchiveBad(self):
"""
Method Name: moveBadFilesToArchiveBad
Description: This method deletes the directory made to store the Bad Data
after moving the data in an archive folder. We archive the bad
files to send them back to the client for invalid data issue.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
now = datetime.now()
date = now.date()
time = now.strftime("%H%M%S")
try:
path= "PredictionArchivedBadData"
if not os.path.isdir(path):
os.makedirs(path)
source = 'Prediction_Raw_Files_Validated/Bad_Raw/'
dest = 'PredictionArchivedBadData/BadData_' + str(date)+"_"+str(time)
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(source)
for f in files:
if f not in os.listdir(dest):
shutil.move(source + f, dest)
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Bad files moved to archive")
path = 'Prediction_Raw_Files_Validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
self.logger.log(file,"Bad Raw Data Folder Deleted successfully!!")
file.close()
except OSError as e:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file, "Error while moving bad files to archive:: %s" % e)
file.close()
raise OSError
def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):
"""
Method Name: validationFileNameRaw
Description: This function validates the name of the prediction csv file as per given name in the schema!
Regex pattern is used to do the validation.If name format do not match the file is moved
to Bad Raw Data folder else in Good raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.
self.deleteExistingBadDataTrainingFolder()
self.deleteExistingGoodDataTrainingFolder()
self.createDirectoryForGoodBadRawData()
onlyfiles = [f for f in listdir(self.Batch_Directory)]
try:
f = open("Prediction_Logs/nameValidationLog.txt", 'a+')
for filename in onlyfiles:
if (re.match(regex, filename)):
splitAtDot = re.split('.csv', filename)
splitAtDot = (re.split('_', splitAtDot[0]))
if len(splitAtDot[1]) == LengthOfDateStampInFile:
if len(splitAtDot[2]) == LengthOfTimeStampInFile:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Good_Raw")
self.logger.log(f,"Valid File name!! File moved to GoodRaw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f, "Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
f.close()
except Exception as e:
f = open("Prediction_Logs/nameValidationLog.txt", 'a+')
self.logger.log(f, "Error occured while validating FileName %s" % e)
f.close()
raise e
def validateColumnLength(self,NumberofColumns):
"""
Method Name: validateColumnLength
Description: This function validates the number of columns in the csv files.
It is should be same as given in the schema file.
If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.
If the column number matches, file is kept in Good Raw Data for processing.
The csv file is missing the first column name, this function changes the missing name to "Wafer".
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f,"Column Length Validation Started!!")
for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):
csv = pd.read_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file)
if csv.shape[1] == NumberofColumns:
csv.rename(columns={"Unnamed: 0": "Wafer"}, inplace=True)
csv.to_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file, index=None, header=True)
else:
shutil.move("Prediction_Raw_Files_Validated/Good_Raw/" + file, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f, "Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
self.logger.log(f, "Column Length Validation Completed!!")
except OSError:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
def deletePredictionFile(self):
if os.path.exists('Prediction_Output_File/Predictions.csv'):
os.remove('Prediction_Output_File/Predictions.csv')
def validateMissingValuesInWholeColumn(self):
"""
Method Name: validateMissingValuesInWholeColumn
Description: This function validates if any column in the csv file has all values missing.
If all the values are missing, the file is not suitable for processing.
SUch files are moved to bad raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Missing Values Validation Started!!")
for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):
csv = pd.read_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file)
count = 0
for columns in csv:
if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):
count+=1
shutil.move("Prediction_Raw_Files_Validated/Good_Raw/" + file,
"Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
break
if count==0:
csv.rename(columns={"Unnamed: 0": "Wafer"}, inplace=True)
csv.to_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file, index=None, header=True)
except OSError:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
|
py | 1a384dd605d2fca749e81ccc1cb28dc114b846cc | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import unittest
from sawtooth_sdk.consensus.zmq_service import ZmqService
from sawtooth_sdk.messaging.future import Future
from sawtooth_sdk.messaging.future import FutureResult
from sawtooth_sdk.protobuf import consensus_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
class TestService(unittest.TestCase):
def setUp(self):
self.mock_stream = unittest.mock.Mock()
self.service = ZmqService(
stream=self.mock_stream,
timeout=10)
def _make_future(self, message_type, content):
fut = Future('test')
fut.set_result(FutureResult(
message_type=message_type,
content=content))
return fut
def test_send_to(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_SEND_TO_RESPONSE,
content=consensus_pb2.ConsensusSendToResponse(
status=consensus_pb2.ConsensusSendToResponse.OK
).SerializeToString())
self.service.send_to(
receiver_id=b'receiver_id',
message_type='message_type',
payload=b'payload')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_SEND_TO_REQUEST,
content=consensus_pb2.ConsensusSendToRequest(
message_type='message_type',
content=b'payload',
receiver_id=b'receiver_id').SerializeToString())
def test_broadcast(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_BROADCAST_RESPONSE,
content=consensus_pb2.ConsensusBroadcastResponse(
status=consensus_pb2.ConsensusBroadcastResponse.OK
).SerializeToString())
self.service.broadcast(
message_type='message_type',
payload=b'payload')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_BROADCAST_REQUEST,
content=consensus_pb2.ConsensusBroadcastRequest(
message_type='message_type',
content=b'payload').SerializeToString())
def test_initialize_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_INITIALIZE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusInitializeBlockResponse(
status=consensus_pb2.ConsensusInitializeBlockResponse.OK
).SerializeToString())
self.service.initialize_block(previous_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_INITIALIZE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusInitializeBlockRequest(
previous_id=b'test').SerializeToString())
def test_summarize_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_SUMMARIZE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusSummarizeBlockResponse(
status=consensus_pb2.ConsensusSummarizeBlockResponse.OK,
summary=b'summary').SerializeToString())
result = self.service.summarize_block()
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_SUMMARIZE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusSummarizeBlockRequest()
.SerializeToString())
self.assertEqual(result, b'summary')
def test_finalize_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_FINALIZE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusFinalizeBlockResponse(
status=consensus_pb2.ConsensusFinalizeBlockResponse.OK,
block_id=b'block_id').SerializeToString())
result = self.service.finalize_block(data=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_FINALIZE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusFinalizeBlockRequest(
data=b'test').SerializeToString())
self.assertEqual(result, b'block_id')
def test_cancel_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_CANCEL_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusCancelBlockResponse(
status=consensus_pb2.ConsensusCancelBlockResponse.OK
).SerializeToString())
self.service.cancel_block()
request = consensus_pb2.ConsensusCancelBlockRequest()
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_CANCEL_BLOCK_REQUEST,
content=request.SerializeToString())
def test_check_blocks(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_CHECK_BLOCKS_RESPONSE,
content=consensus_pb2.ConsensusCheckBlocksResponse(
status=consensus_pb2.ConsensusCheckBlocksResponse.OK
).SerializeToString())
self.service.check_blocks(priority=[b'test1', b'test2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_CHECK_BLOCKS_REQUEST,
content=consensus_pb2.ConsensusCheckBlocksRequest(
block_ids=[b'test1', b'test2']).SerializeToString())
def test_commit_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_COMMIT_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusCommitBlockResponse(
status=consensus_pb2.ConsensusCommitBlockResponse.OK
).SerializeToString())
self.service.commit_block(block_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_COMMIT_BLOCK_REQUEST,
content=consensus_pb2.ConsensusCommitBlockRequest(
block_id=b'test').SerializeToString())
def test_ignore_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_IGNORE_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusIgnoreBlockResponse(
status=consensus_pb2.ConsensusIgnoreBlockResponse.OK
).SerializeToString())
self.service.ignore_block(block_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_IGNORE_BLOCK_REQUEST,
content=consensus_pb2.ConsensusIgnoreBlockRequest(
block_id=b'test').SerializeToString())
def test_fail_block(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_FAIL_BLOCK_RESPONSE,
content=consensus_pb2.ConsensusFailBlockResponse(
status=consensus_pb2.ConsensusFailBlockResponse.OK
).SerializeToString())
self.service.fail_block(block_id=b'test')
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_FAIL_BLOCK_REQUEST,
content=consensus_pb2.ConsensusFailBlockRequest(
block_id=b'test').SerializeToString())
def test_get_blocks(self):
block_1 = consensus_pb2.ConsensusBlock(
block_id=b'block1',
previous_id=b'block0',
signer_id=b'signer1',
block_num=1,
payload=b'test1')
block_2 = consensus_pb2.ConsensusBlock(
block_id=b'block2',
previous_id=b'block1',
signer_id=b'signer2',
block_num=2,
payload=b'test2')
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_BLOCKS_GET_RESPONSE,
content=consensus_pb2.ConsensusBlocksGetResponse(
status=consensus_pb2.ConsensusBlocksGetResponse.OK,
blocks=[block_1, block_2]).SerializeToString())
blocks = self.service.get_blocks(block_ids=[b'id1', b'id2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_BLOCKS_GET_REQUEST,
content=consensus_pb2.ConsensusBlocksGetRequest(
block_ids=[b'id1', b'id2']).SerializeToString())
self.assertEqual({
block_id: (
block.previous_id,
block.signer_id,
block.block_num,
block.payload)
for block_id, block in blocks.items()
}, {
b'block1': (b'block0', b'signer1', 1, b'test1'),
b'block2': (b'block1', b'signer2', 2, b'test2'),
})
def test_get_chain_head(self):
block = consensus_pb2.ConsensusBlock(
block_id=b'block',
previous_id=b'block0',
signer_id=b'signer',
block_num=1,
payload=b'test')
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_CHAIN_HEAD_GET_RESPONSE,
content=consensus_pb2.ConsensusChainHeadGetResponse(
status=consensus_pb2.ConsensusChainHeadGetResponse.OK,
block=block).SerializeToString())
chain_head = self.service.get_chain_head()
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_CHAIN_HEAD_GET_REQUEST,
content=consensus_pb2.ConsensusChainHeadGetRequest()
.SerializeToString())
self.assertEqual(chain_head.block_id, b'block')
self.assertEqual(chain_head.previous_id, b'block0')
self.assertEqual(chain_head.signer_id, b'signer')
self.assertEqual(chain_head.block_num, 1)
self.assertEqual(chain_head.payload, b'test')
def test_get_settings(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_SETTINGS_GET_RESPONSE,
content=consensus_pb2.ConsensusSettingsGetResponse(
status=consensus_pb2.ConsensusSettingsGetResponse.OK,
entries=[
consensus_pb2.ConsensusSettingsEntry(
key='key1',
value='value1'),
consensus_pb2.ConsensusSettingsEntry(
key='key2',
value='value2')]).SerializeToString())
entries = self.service.get_settings(
block_id=b'test',
settings=['test1', 'test2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_SETTINGS_GET_REQUEST,
content=consensus_pb2.ConsensusSettingsGetRequest(
block_id=b'test',
keys=['test1', 'test2']).SerializeToString())
self.assertEqual(
entries, {
'key1': 'value1',
'key2': 'value2',
})
def test_get_state(self):
self.mock_stream.send.return_value = self._make_future(
message_type=Message.CONSENSUS_STATE_GET_RESPONSE,
content=consensus_pb2.ConsensusStateGetResponse(
status=consensus_pb2.ConsensusStateGetResponse.OK,
entries=[
consensus_pb2.ConsensusStateEntry(
address='address1',
data=b'data1'),
consensus_pb2.ConsensusStateEntry(
address='address2',
data=b'data2')]).SerializeToString())
entries = self.service.get_state(
block_id=b'test',
addresses=['test1', 'test2'])
self.mock_stream.send.assert_called_with(
message_type=Message.CONSENSUS_STATE_GET_REQUEST,
content=consensus_pb2.ConsensusStateGetRequest(
block_id=b'test',
addresses=['test1', 'test2']).SerializeToString())
self.assertEqual(
entries, {
'address1': b'data1',
'address2': b'data2',
})
|
py | 1a384fb222a8629cc204141841141d1753db5b0e | class PreFlightHttpFailed(Exception):
def __init__(self, error: str = ""):
Exception.__init__(self,f"Http Preflight request failed!Error : {error}") |
py | 1a38502db6f447078abdd713c4c5f59af19ef01b | '''
Function:
音悦台MV下载: http://www.yinyuetai.com
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import re
import requests
from utils.utils import *
'''
Input:
--url: 视频地址
--savepath: 视频下载后保存的路径
Output:
--is_success: 下载是否成功的BOOL值
'''
class yinyuetai():
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
self.info_url = 'http://www.yinyuetai.com/insite/get-video-info?flex=true&videoId={}'
'''外部调用'''
def get(self, url, savepath='videos'):
video_infos = self.__getvideoinfos(url)
is_success = self.__download(video_infos, savepath)
return is_success
'''下载'''
def __download(self, video_infos, savepath):
checkFolder(savepath)
download_url = video_infos[0]
video_name = 'yinyuetai_' + video_infos[1] + '.mp4'
try:
is_success = downloadBASE(url=download_url, savename=video_name, savepath=savepath, headers=self.headers, stream=True, verify=False)
except:
is_success = False
return is_success
'''获得视频信息'''
def __getvideoinfos(self, url):
mvid = url.split('/')[-1]
res = requests.get(self.info_url.format(mvid), headers=self.headers)
pattern = re.compile(r'http://\w*?\.yinyuetai\.com/uploads/videos/common/.*?(?=&br)')
re_result = re.findall(pattern, res.text)
# 选择质量最佳的视频
download_url = re_result[-1]
video_infos = [download_url, mvid]
return video_infos
'''test'''
if __name__ == '__main__':
url = 'http://v.yinyuetai.com/video/3247548'
yinyuetai().get(url, savepath='videos') |
py | 1a3851d28052e38eebe722b540616b4cd9e16431 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.template.backends.django import copy_exception
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.inspect import func_supports_parameter
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {} # RemovedInDjango20Warning
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, template_dirs=None, skip=None):
"""
Perform the caching that gives this loader its name. Often many of the
templates attempted will be missing, so memory use is of concern here.
To keep it in check, caching behavior is a little complicated when a
template is not found. See ticket #26306 for more details.
With template debugging disabled, cache the TemplateDoesNotExist class
for every missing template and raise a new instance of it after
fetching it from the cache.
With template debugging enabled, a unique TemplateDoesNotExist object
is cached for each missing template to preserve debug data. When
raising an exception, Python sets __traceback__, __context__, and
__cause__ attributes on it. Those attributes can contain references to
all sorts of objects up the call chain and caching them creates a
memory leak. Thus, unraised copies of the exceptions are cached and
copies of those copies are raised after they're fetched from the cache.
"""
key = self.cache_key(template_name, template_dirs, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist):
raise cached(template_name)
elif isinstance(cached, TemplateDoesNotExist):
raise copy_exception(cached)
return cached
try:
template = super(Loader, self).get_template(
template_name, template_dirs, skip,
)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = copy_exception(e) if self.engine.debug else TemplateDoesNotExist
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name, template_dirs=None):
for loader in self.loaders:
args = [template_name]
# RemovedInDjango20Warning: Add template_dirs for compatibility
# with old loaders
if func_supports_parameter(loader.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in loader.get_template_sources(*args):
yield origin
def cache_key(self, template_name, template_dirs, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if template_dirs:
dirs_prefix = self.generate_hash(template_dirs)
return '-'.join(filter(bool, [template_name, skip_prefix, dirs_prefix]))
def generate_hash(self, values):
return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
@property
def supports_recursion(self):
"""
RemovedInDjango20Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return all(hasattr(loader, 'get_contents') for loader in self.loaders)
def find_template(self, name, dirs=None):
"""
RemovedInDjango20Warning: An internal method to lookup the template
name in all the configured loaders.
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = Origin(
name=display_name,
template_name=name,
loader=loader,
)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango20Warning,
)
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist(template_name)
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear() # RemovedInDjango20Warning
self.get_template_cache.clear()
|
py | 1a3851e14521ed6899701416e6a50e48c08062c3 | import queue
from enum import Enum
import carla
class Side(Enum):
Top = 1
Bottom = 2
Front = 3
Back = 4
Right = 5
Left = 6
def rotation(self):
if self is Side.Top:
return carla.Rotation(90, 0, 0)
elif self is Side.Bottom:
return carla.Rotation(-90, 0, 0)
elif self is Side.Front:
return carla.Rotation(0, 0, 0)
elif self is Side.Back:
return carla.Rotation(0, 180, 0)
elif self is Side.Right:
return carla.Rotation(0, 90, 0)
elif self is Side.Left:
return carla.Rotation(0, -90, 0)
else:
raise ValueError(f"No known side for {self}")
class SideMap:
def __init__(self):
self.sides = {}
def __setitem__(self, key: Side, value):
q = queue.Queue()
value.listen(q.put)
self.sides[key] = (value, q)
return q
def camera(self, key: Side):
return self.sides[key][0]
def queue(self, key: Side) -> queue.Queue:
return self.sides[key][1]
def has(self):
return all(not x[1].empty() for x in self.sides.values())
def pop(self, frame, timeout=2.0, fn=lambda x: x):
data = {}
for k, v in self.sides.items():
while True:
d = v[1].get(timeout=timeout)
if d.frame == frame:
data[k] = fn(d)
break
return data
|
py | 1a385226bc9165760c42b9a9cacaacaabff41bce | from __future__ import unicode_literals
import re
import uuid
from datetime import datetime
from random import random, randint
import pytz
from boto3 import Session
from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from moto.ec2 import ec2_backends
from copy import copy
from .exceptions import ServiceNotFoundException, TaskDefinitionNotFoundException
class BaseObject(BaseModel):
def camelCase(self, key):
words = []
for i, word in enumerate(key.split("_")):
if i > 0:
words.append(word.title())
else:
words.append(word)
return "".join(words)
def gen_response_object(self):
response_object = copy(self.__dict__)
for key, value in self.__dict__.items():
if "_" in key:
response_object[self.camelCase(key)] = value
del response_object[key]
return response_object
@property
def response_object(self):
return self.gen_response_object()
class Cluster(BaseObject):
def __init__(self, cluster_name, region_name):
self.active_services_count = 0
self.arn = "arn:aws:ecs:{0}:012345678910:cluster/{1}".format(
region_name, cluster_name
)
self.name = cluster_name
self.pending_tasks_count = 0
self.registered_container_instances_count = 0
self.running_tasks_count = 0
self.status = "ACTIVE"
self.region_name = region_name
@property
def physical_resource_id(self):
return self.name
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["clusterArn"] = self.arn
response_object["clusterName"] = self.name
del response_object["arn"], response_object["name"]
return response_object
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# if properties is not provided, cloudformation will use the default values for all properties
if "Properties" in cloudformation_json:
properties = cloudformation_json["Properties"]
else:
properties = {}
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random
# name if necessary
cluster_name=properties.get(
"ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6))
)
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if original_resource.name != properties["ClusterName"]:
ecs_backend = ecs_backends[region_name]
ecs_backend.delete_cluster(original_resource.arn)
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a
# random name if necessary
cluster_name=properties.get(
"ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6))
)
)
else:
# no-op when nothing changed between old and new resources
return original_resource
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
class TaskDefinition(BaseObject):
def __init__(
self,
family,
revision,
container_definitions,
region_name,
network_mode=None,
volumes=None,
tags=None,
placement_constraints=None,
):
self.family = family
self.revision = revision
self.arn = "arn:aws:ecs:{0}:012345678910:task-definition/{1}:{2}".format(
region_name, family, revision
)
self.container_definitions = container_definitions
self.tags = tags if tags is not None else []
if volumes is None:
self.volumes = []
else:
self.volumes = volumes
if network_mode is None:
self.network_mode = "bridge"
else:
self.network_mode = network_mode
self.placement_constraints = (
placement_constraints if placement_constraints is not None else []
)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["taskDefinitionArn"] = response_object["arn"]
del response_object["arn"]
del response_object["tags"]
return response_object
@property
def physical_resource_id(self):
return self.arn
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
family = properties.get(
"Family", "task-definition-{0}".format(int(random() * 10 ** 6))
)
container_definitions = properties["ContainerDefinitions"]
volumes = properties.get("Volumes")
ecs_backend = ecs_backends[region_name]
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
family = properties.get(
"Family", "task-definition-{0}".format(int(random() * 10 ** 6))
)
container_definitions = properties["ContainerDefinitions"]
volumes = properties.get("Volumes")
if (
original_resource.family != family
or original_resource.container_definitions != container_definitions
or original_resource.volumes != volumes
):
# currently TaskRoleArn isn't stored at TaskDefinition
# instances
ecs_backend = ecs_backends[region_name]
ecs_backend.deregister_task_definition(original_resource.arn)
return ecs_backend.register_task_definition(
family=family,
container_definitions=container_definitions,
volumes=volumes,
)
else:
# no-op when nothing changed between old and new resources
return original_resource
class Task(BaseObject):
def __init__(
self,
cluster,
task_definition,
container_instance_arn,
resource_requirements,
overrides={},
started_by="",
):
self.cluster_arn = cluster.arn
self.task_arn = "arn:aws:ecs:{0}:012345678910:task/{1}".format(
cluster.region_name, str(uuid.uuid4())
)
self.container_instance_arn = container_instance_arn
self.last_status = "RUNNING"
self.desired_status = "RUNNING"
self.task_definition_arn = task_definition.arn
self.overrides = overrides
self.containers = []
self.started_by = started_by
self.stopped_reason = ""
self.resource_requirements = resource_requirements
@property
def response_object(self):
response_object = self.gen_response_object()
return response_object
class Service(BaseObject):
def __init__(
self,
cluster,
service_name,
task_definition,
desired_count,
load_balancers=None,
scheduling_strategy=None,
tags=None,
):
self.cluster_arn = cluster.arn
self.arn = "arn:aws:ecs:{0}:012345678910:service/{1}".format(
cluster.region_name, service_name
)
self.name = service_name
self.status = "ACTIVE"
self.running_count = 0
self.task_definition = task_definition.arn
self.desired_count = desired_count
self.events = []
self.deployments = [
{
"createdAt": datetime.now(pytz.utc),
"desiredCount": self.desired_count,
"id": "ecs-svc/{}".format(randint(0, 32 ** 12)),
"pendingCount": self.desired_count,
"runningCount": 0,
"status": "PRIMARY",
"taskDefinition": task_definition.arn,
"updatedAt": datetime.now(pytz.utc),
}
]
self.load_balancers = load_balancers if load_balancers is not None else []
self.scheduling_strategy = (
scheduling_strategy if scheduling_strategy is not None else "REPLICA"
)
self.tags = tags if tags is not None else []
self.pending_count = 0
@property
def physical_resource_id(self):
return self.arn
@property
def response_object(self):
response_object = self.gen_response_object()
del response_object["name"], response_object["arn"], response_object["tags"]
response_object["serviceName"] = self.name
response_object["serviceArn"] = self.arn
response_object["schedulingStrategy"] = self.scheduling_strategy
for deployment in response_object["deployments"]:
if isinstance(deployment["createdAt"], datetime):
deployment["createdAt"] = unix_time(
deployment["createdAt"].replace(tzinfo=None)
)
if isinstance(deployment["updatedAt"], datetime):
deployment["updatedAt"] = unix_time(
deployment["updatedAt"].replace(tzinfo=None)
)
return response_object
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if isinstance(properties["Cluster"], Cluster):
cluster = properties["Cluster"].name
else:
cluster = properties["Cluster"]
if isinstance(properties["TaskDefinition"], TaskDefinition):
task_definition = properties["TaskDefinition"].family
else:
task_definition = properties["TaskDefinition"]
service_name = "{0}Service{1}".format(cluster, int(random() * 10 ** 6))
desired_count = properties["DesiredCount"]
# TODO: LoadBalancers
# TODO: Role
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_service(
cluster, service_name, task_definition, desired_count
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if isinstance(properties["Cluster"], Cluster):
cluster_name = properties["Cluster"].name
else:
cluster_name = properties["Cluster"]
if isinstance(properties["TaskDefinition"], TaskDefinition):
task_definition = properties["TaskDefinition"].family
else:
task_definition = properties["TaskDefinition"]
desired_count = properties["DesiredCount"]
ecs_backend = ecs_backends[region_name]
service_name = original_resource.name
if original_resource.cluster_arn != Cluster(cluster_name, region_name).arn:
# TODO: LoadBalancers
# TODO: Role
ecs_backend.delete_service(cluster_name, service_name)
new_service_name = "{0}Service{1}".format(
cluster_name, int(random() * 10 ** 6)
)
return ecs_backend.create_service(
cluster_name, new_service_name, task_definition, desired_count
)
else:
return ecs_backend.update_service(
cluster_name, service_name, task_definition, desired_count
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Name":
return self.name
raise UnformattedGetAttTemplateException()
class ContainerInstance(BaseObject):
def __init__(self, ec2_instance_id, region_name):
self.ec2_instance_id = ec2_instance_id
self.agent_connected = True
self.status = "ACTIVE"
self.registered_resources = [
{
"doubleValue": 0.0,
"integerValue": 4096,
"longValue": 0,
"name": "CPU",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 7482,
"longValue": 0,
"name": "MEMORY",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS",
"stringSetValue": ["22", "2376", "2375", "51678", "51679"],
"type": "STRINGSET",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS_UDP",
"stringSetValue": [],
"type": "STRINGSET",
},
]
self.container_instance_arn = "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format(
region_name, str(uuid.uuid4())
)
self.pending_tasks_count = 0
self.remaining_resources = [
{
"doubleValue": 0.0,
"integerValue": 4096,
"longValue": 0,
"name": "CPU",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 7482,
"longValue": 0,
"name": "MEMORY",
"type": "INTEGER",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS",
"stringSetValue": ["22", "2376", "2375", "51678", "51679"],
"type": "STRINGSET",
},
{
"doubleValue": 0.0,
"integerValue": 0,
"longValue": 0,
"name": "PORTS_UDP",
"stringSetValue": [],
"type": "STRINGSET",
},
]
self.running_tasks_count = 0
self.version_info = {
"agentVersion": "1.0.0",
"agentHash": "4023248",
"dockerVersion": "DockerVersion: 1.5.0",
}
ec2_backend = ec2_backends[region_name]
ec2_instance = ec2_backend.get_instance(ec2_instance_id)
self.attributes = {
"ecs.ami-id": ec2_instance.image_id,
"ecs.availability-zone": ec2_instance.placement,
"ecs.instance-type": ec2_instance.instance_type,
"ecs.os-type": ec2_instance.platform
if ec2_instance.platform == "windows"
else "linux", # options are windows and linux, linux is default
}
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["attributes"] = [
self._format_attribute(name, value)
for name, value in response_object["attributes"].items()
]
return response_object
def _format_attribute(self, name, value):
formatted_attr = {"name": name}
if value is not None:
formatted_attr["value"] = value
return formatted_attr
class ClusterFailure(BaseObject):
def __init__(self, reason, cluster_name, region_name):
self.reason = reason
self.arn = "arn:aws:ecs:{0}:012345678910:cluster/{1}".format(
region_name, cluster_name
)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["reason"] = self.reason
response_object["arn"] = self.arn
return response_object
class ContainerInstanceFailure(BaseObject):
def __init__(self, reason, container_instance_id, region_name):
self.reason = reason
self.arn = "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format(
region_name, container_instance_id
)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object["reason"] = self.reason
response_object["arn"] = self.arn
return response_object
class EC2ContainerServiceBackend(BaseBackend):
def __init__(self, region_name):
super(EC2ContainerServiceBackend, self).__init__()
self.clusters = {}
self.task_definitions = {}
self.tasks = {}
self.services = {}
self.container_instances = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def describe_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split("/")[-1]
if ":" in task_definition_name:
family, revision = task_definition_name.split(":")
revision = int(revision)
else:
family = task_definition_name
revision = self._get_last_task_definition_revision_id(family)
if (
family in self.task_definitions
and revision in self.task_definitions[family]
):
return self.task_definitions[family][revision]
else:
raise Exception("{0} is not a task_definition".format(task_definition_name))
def create_cluster(self, cluster_name):
cluster = Cluster(cluster_name, self.region_name)
self.clusters[cluster_name] = cluster
return cluster
def list_clusters(self):
"""
maxSize and pagination not implemented
"""
return [cluster.arn for cluster in self.clusters.values()]
def describe_clusters(self, list_clusters_name=None):
list_clusters = []
failures = []
if list_clusters_name is None:
if "default" in self.clusters:
list_clusters.append(self.clusters["default"].response_object)
else:
for cluster in list_clusters_name:
cluster_name = cluster.split("/")[-1]
if cluster_name in self.clusters:
list_clusters.append(self.clusters[cluster_name].response_object)
else:
failures.append(
ClusterFailure("MISSING", cluster_name, self.region_name)
)
return list_clusters, failures
def delete_cluster(self, cluster_str):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
return self.clusters.pop(cluster_name)
else:
raise Exception("{0} is not a cluster".format(cluster_name))
def register_task_definition(
self,
family,
container_definitions,
volumes=None,
network_mode=None,
tags=None,
placement_constraints=None,
):
if family in self.task_definitions:
last_id = self._get_last_task_definition_revision_id(family)
revision = (last_id or 0) + 1
else:
self.task_definitions[family] = {}
revision = 1
task_definition = TaskDefinition(
family,
revision,
container_definitions,
self.region_name,
volumes=volumes,
network_mode=network_mode,
tags=tags,
placement_constraints=placement_constraints,
)
self.task_definitions[family][revision] = task_definition
return task_definition
def list_task_definitions(self, family_prefix):
task_arns = []
for task_definition_list in self.task_definitions.values():
task_arns.extend(
[
task_definition.arn
for task_definition in task_definition_list.values()
if family_prefix is None or task_definition.family == family_prefix
]
)
return task_arns
def deregister_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split("/")[-1]
family, revision = task_definition_name.split(":")
revision = int(revision)
if (
family in self.task_definitions
and revision in self.task_definitions[family]
):
return self.task_definitions[family].pop(revision)
else:
raise Exception("{0} is not a task_definition".format(task_definition_name))
def run_task(self, cluster_str, task_definition_str, count, overrides, started_by):
if cluster_str:
cluster_name = cluster_str.split("/")[-1]
else:
cluster_name = "default"
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
container_instances = list(
self.container_instances.get(cluster_name, {}).keys()
)
if not container_instances:
raise Exception("No instances found in cluster {}".format(cluster_name))
active_container_instances = [
x
for x in container_instances
if self.container_instances[cluster_name][x].status == "ACTIVE"
]
resource_requirements = self._calculate_task_resource_requirements(
task_definition
)
# TODO: return event about unable to place task if not able to place enough tasks to meet count
placed_count = 0
for container_instance in active_container_instances:
container_instance = self.container_instances[cluster_name][
container_instance
]
container_instance_arn = container_instance.container_instance_arn
try_to_place = True
while try_to_place:
can_be_placed, message = self._can_be_placed(
container_instance, resource_requirements
)
if can_be_placed:
task = Task(
cluster,
task_definition,
container_instance_arn,
resource_requirements,
overrides or {},
started_by or "",
)
self.update_container_instance_resources(
container_instance, resource_requirements
)
tasks.append(task)
self.tasks[cluster_name][task.task_arn] = task
placed_count += 1
if placed_count == count:
return tasks
else:
try_to_place = False
return tasks
@staticmethod
def _calculate_task_resource_requirements(task_definition):
resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []}
for container_definition in task_definition.container_definitions:
# cloudformation uses capitalized properties, while boto uses all lower case
# CPU is optional
resource_requirements["CPU"] += container_definition.get(
"cpu", container_definition.get("Cpu", 0)
)
# either memory or memory reservation must be provided
if (
"Memory" in container_definition
or "MemoryReservation" in container_definition
):
resource_requirements["MEMORY"] += container_definition.get(
"Memory", container_definition.get("MemoryReservation")
)
else:
resource_requirements["MEMORY"] += container_definition.get(
"memory", container_definition.get("memoryReservation")
)
port_mapping_key = (
"PortMappings"
if "PortMappings" in container_definition
else "portMappings"
)
for port_mapping in container_definition.get(port_mapping_key, []):
if "hostPort" in port_mapping:
resource_requirements["PORTS"].append(port_mapping.get("hostPort"))
elif "HostPort" in port_mapping:
resource_requirements["PORTS"].append(port_mapping.get("HostPort"))
return resource_requirements
@staticmethod
def _can_be_placed(container_instance, task_resource_requirements):
"""
:param container_instance: The container instance trying to be placed onto
:param task_resource_requirements: The calculated resource requirements of the task in the form of a dict
:return: A boolean stating whether the given container instance has enough resources to have the task placed on
it as well as a description, if it cannot be placed this will describe why.
"""
# TODO: Implement default and other placement strategies as well as constraints:
# docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement.html
remaining_cpu = 0
remaining_memory = 0
reserved_ports = []
for resource in container_instance.remaining_resources:
if resource.get("name") == "CPU":
remaining_cpu = resource.get("integerValue")
elif resource.get("name") == "MEMORY":
remaining_memory = resource.get("integerValue")
elif resource.get("name") == "PORTS":
reserved_ports = resource.get("stringSetValue")
if task_resource_requirements.get("CPU") > remaining_cpu:
return False, "Not enough CPU credits"
if task_resource_requirements.get("MEMORY") > remaining_memory:
return False, "Not enough memory"
ports_needed = task_resource_requirements.get("PORTS")
for port in ports_needed:
if str(port) in reserved_ports:
return False, "Port clash"
return True, "Can be placed"
def start_task(
self,
cluster_str,
task_definition_str,
container_instances,
overrides,
started_by,
):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
if not container_instances:
raise Exception("No container instance list provided")
container_instance_ids = [x.split("/")[-1] for x in container_instances]
resource_requirements = self._calculate_task_resource_requirements(
task_definition
)
for container_instance_id in container_instance_ids:
container_instance = self.container_instances[cluster_name][
container_instance_id
]
task = Task(
cluster,
task_definition,
container_instance.container_instance_arn,
resource_requirements,
overrides or {},
started_by or "",
)
tasks.append(task)
self.update_container_instance_resources(
container_instance, resource_requirements
)
self.tasks[cluster_name][task.task_arn] = task
return tasks
def describe_tasks(self, cluster_str, tasks):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
if not tasks:
raise Exception("tasks cannot be empty")
response = []
for cluster, cluster_tasks in self.tasks.items():
for task_arn, task in cluster_tasks.items():
task_id = task_arn.split("/")[-1]
if (
task_arn in tasks
or task.task_arn in tasks
or any(task_id in task for task in tasks)
):
response.append(task)
return response
def list_tasks(
self,
cluster_str,
container_instance,
family,
started_by,
service_name,
desiredStatus,
):
filtered_tasks = []
for cluster, tasks in self.tasks.items():
for arn, task in tasks.items():
filtered_tasks.append(task)
if cluster_str:
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
filtered_tasks = list(
filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks)
)
if container_instance:
filtered_tasks = list(
filter(
lambda t: container_instance in t.container_instance_arn,
filtered_tasks,
)
)
if started_by:
filtered_tasks = list(
filter(lambda t: started_by == t.started_by, filtered_tasks)
)
return [t.task_arn for t in filtered_tasks]
def stop_task(self, cluster_str, task_str, reason):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not task_str:
raise Exception("A task ID or ARN is required")
task_id = task_str.split("/")[-1]
tasks = self.tasks.get(cluster_name, None)
if not tasks:
raise Exception("Cluster {} has no registered tasks".format(cluster_name))
for task in tasks.keys():
if task.endswith(task_id):
container_instance_arn = tasks[task].container_instance_arn
container_instance = self.container_instances[cluster_name][
container_instance_arn.split("/")[-1]
]
self.update_container_instance_resources(
container_instance, tasks[task].resource_requirements, removing=True
)
tasks[task].last_status = "STOPPED"
tasks[task].desired_status = "STOPPED"
tasks[task].stopped_reason = reason
return tasks[task]
raise Exception(
"Could not find task {} on cluster {}".format(task_str, cluster_name)
)
def create_service(
self,
cluster_str,
service_name,
task_definition_str,
desired_count,
load_balancers=None,
scheduling_strategy=None,
tags=None,
):
cluster_name = cluster_str.split("/")[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
desired_count = desired_count if desired_count is not None else 0
service = Service(
cluster,
service_name,
task_definition,
desired_count,
load_balancers,
scheduling_strategy,
tags,
)
cluster_service_pair = "{0}:{1}".format(cluster_name, service_name)
self.services[cluster_service_pair] = service
return service
def list_services(self, cluster_str, scheduling_strategy=None):
cluster_name = cluster_str.split("/")[-1]
service_arns = []
for key, value in self.services.items():
if cluster_name + ":" in key:
service = self.services[key]
if (
scheduling_strategy is None
or service.scheduling_strategy == scheduling_strategy
):
service_arns.append(service.arn)
return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns):
cluster_name = cluster_str.split("/")[-1]
result = []
for existing_service_name, existing_service_obj in sorted(
self.services.items()
):
for requested_name_or_arn in service_names_or_arns:
cluster_service_pair = "{0}:{1}".format(
cluster_name, requested_name_or_arn
)
if (
cluster_service_pair == existing_service_name
or existing_service_obj.arn == requested_name_or_arn
):
result.append(existing_service_obj)
return result
def update_service(
self, cluster_str, service_name, task_definition_str, desired_count
):
cluster_name = cluster_str.split("/")[-1]
cluster_service_pair = "{0}:{1}".format(cluster_name, service_name)
if cluster_service_pair in self.services:
if task_definition_str is not None:
self.describe_task_definition(task_definition_str)
self.services[
cluster_service_pair
].task_definition = task_definition_str
if desired_count is not None:
self.services[cluster_service_pair].desired_count = desired_count
return self.services[cluster_service_pair]
else:
raise ServiceNotFoundException(service_name)
def delete_service(self, cluster_name, service_name):
cluster_service_pair = "{0}:{1}".format(cluster_name, service_name)
if cluster_service_pair in self.services:
service = self.services[cluster_service_pair]
if service.desired_count > 0:
raise Exception("Service must have desiredCount=0")
else:
return self.services.pop(cluster_service_pair)
else:
raise Exception(
"cluster {0} or service {1} does not exist".format(
cluster_name, service_name
)
)
def register_container_instance(self, cluster_str, ec2_instance_id):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance = ContainerInstance(ec2_instance_id, self.region_name)
if not self.container_instances.get(cluster_name):
self.container_instances[cluster_name] = {}
container_instance_id = container_instance.container_instance_arn.split("/")[-1]
self.container_instances[cluster_name][
container_instance_id
] = container_instance
self.clusters[cluster_name].registered_container_instances_count += 1
return container_instance
def list_container_instances(self, cluster_str):
cluster_name = cluster_str.split("/")[-1]
container_instances_values = self.container_instances.get(
cluster_name, {}
).values()
container_instances = [
ci.container_instance_arn for ci in container_instances_values
]
return sorted(container_instances)
def describe_container_instances(self, cluster_str, list_container_instance_ids):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not list_container_instance_ids:
raise JsonRESTError(
"InvalidParameterException", "Container instance cannot be empty"
)
failures = []
container_instance_objects = []
for container_instance_id in list_container_instance_ids:
container_instance_id = container_instance_id.split("/")[-1]
container_instance = self.container_instances[cluster_name].get(
container_instance_id, None
)
if container_instance is not None:
container_instance_objects.append(container_instance)
else:
failures.append(
ContainerInstanceFailure(
"MISSING", container_instance_id, self.region_name
)
)
return container_instance_objects, failures
def update_container_instances_state(
self, cluster_str, list_container_instance_ids, status
):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
status = status.upper()
if status not in ["ACTIVE", "DRAINING"]:
raise Exception(
"An error occurred (InvalidParameterException) when calling the UpdateContainerInstancesState operation: \
Container instances status should be one of [ACTIVE,DRAINING]"
)
failures = []
container_instance_objects = []
list_container_instance_ids = [
x.split("/")[-1] for x in list_container_instance_ids
]
for container_instance_id in list_container_instance_ids:
container_instance = self.container_instances[cluster_name].get(
container_instance_id, None
)
if container_instance is not None:
container_instance.status = status
container_instance_objects.append(container_instance)
else:
failures.append(
ContainerInstanceFailure(
"MISSING", container_instance_id, self.region_name
)
)
return container_instance_objects, failures
def update_container_instance_resources(
self, container_instance, task_resources, removing=False
):
resource_multiplier = 1
if removing:
resource_multiplier = -1
for resource in container_instance.remaining_resources:
if resource.get("name") == "CPU":
resource["integerValue"] -= (
task_resources.get("CPU") * resource_multiplier
)
elif resource.get("name") == "MEMORY":
resource["integerValue"] -= (
task_resources.get("MEMORY") * resource_multiplier
)
elif resource.get("name") == "PORTS":
for port in task_resources.get("PORTS"):
if removing:
resource["stringSetValue"].remove(str(port))
else:
resource["stringSetValue"].append(str(port))
container_instance.running_tasks_count += resource_multiplier * 1
def deregister_container_instance(self, cluster_str, container_instance_str, force):
failures = []
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance_id = container_instance_str.split("/")[-1]
container_instance = self.container_instances[cluster_name].get(
container_instance_id
)
if container_instance is None:
raise Exception("{0} is not a container id in the cluster")
if not force and container_instance.running_tasks_count > 0:
raise Exception("Found running tasks on the instance.")
# Currently assume that people might want to do something based around deregistered instances
# with tasks left running on them - but nothing if no tasks were running already
elif force and container_instance.running_tasks_count > 0:
if not self.container_instances.get("orphaned"):
self.container_instances["orphaned"] = {}
self.container_instances["orphaned"][
container_instance_id
] = container_instance
del self.container_instances[cluster_name][container_instance_id]
self._respond_to_cluster_state_update(cluster_str)
return container_instance, failures
def _respond_to_cluster_state_update(self, cluster_str):
cluster_name = cluster_str.split("/")[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
pass
def put_attributes(self, cluster_name, attributes=None):
if cluster_name is None or cluster_name not in self.clusters:
raise JsonRESTError(
"ClusterNotFoundException", "Cluster not found", status=400
)
if attributes is None:
raise JsonRESTError(
"InvalidParameterException", "attributes value is required"
)
for attr in attributes:
self._put_attribute(
cluster_name,
attr["name"],
attr.get("value"),
attr.get("targetId"),
attr.get("targetType"),
)
def _put_attribute(
self, cluster_name, name, value=None, target_id=None, target_type=None
):
if target_id is None and target_type is None:
for instance in self.container_instances[cluster_name].values():
instance.attributes[name] = value
elif target_type is None:
# targetId is full container instance arn
try:
arn = target_id.rsplit("/", 1)[-1]
self.container_instances[cluster_name][arn].attributes[name] = value
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
else:
# targetId is container uuid, targetType must be container-instance
try:
if target_type != "container-instance":
raise JsonRESTError(
"TargetNotFoundException",
"Could not find {0}".format(target_id),
)
self.container_instances[cluster_name][target_id].attributes[
name
] = value
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
def list_attributes(
self,
target_type,
cluster_name=None,
attr_name=None,
attr_value=None,
max_results=None,
next_token=None,
):
if target_type != "container-instance":
raise JsonRESTError(
"InvalidParameterException", "targetType must be container-instance"
)
filters = [lambda x: True]
# item will be {0 cluster_name, 1 arn, 2 name, 3 value}
if cluster_name is not None:
filters.append(lambda item: item[0] == cluster_name)
if attr_name:
filters.append(lambda item: item[2] == attr_name)
if attr_name:
filters.append(lambda item: item[3] == attr_value)
all_attrs = []
for cluster_name, cobj in self.container_instances.items():
for container_instance in cobj.values():
for key, value in container_instance.attributes.items():
all_attrs.append(
(
cluster_name,
container_instance.container_instance_arn,
key,
value,
)
)
return filter(lambda x: all(f(x) for f in filters), all_attrs)
def delete_attributes(self, cluster_name, attributes=None):
if cluster_name is None or cluster_name not in self.clusters:
raise JsonRESTError(
"ClusterNotFoundException", "Cluster not found", status=400
)
if attributes is None:
raise JsonRESTError(
"InvalidParameterException", "attributes value is required"
)
for attr in attributes:
self._delete_attribute(
cluster_name,
attr["name"],
attr.get("value"),
attr.get("targetId"),
attr.get("targetType"),
)
def _delete_attribute(
self, cluster_name, name, value=None, target_id=None, target_type=None
):
if target_id is None and target_type is None:
for instance in self.container_instances[cluster_name].values():
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
elif target_type is None:
# targetId is full container instance arn
try:
arn = target_id.rsplit("/", 1)[-1]
instance = self.container_instances[cluster_name][arn]
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
else:
# targetId is container uuid, targetType must be container-instance
try:
if target_type != "container-instance":
raise JsonRESTError(
"TargetNotFoundException",
"Could not find {0}".format(target_id),
)
instance = self.container_instances[cluster_name][target_id]
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
except KeyError:
raise JsonRESTError(
"TargetNotFoundException", "Could not find {0}".format(target_id)
)
def list_task_definition_families(
self, family_prefix=None, status=None, max_results=None, next_token=None
):
for task_fam in self.task_definitions:
if family_prefix is not None and not task_fam.startswith(family_prefix):
continue
yield task_fam
@staticmethod
def _parse_resource_arn(resource_arn):
match = re.match(
"^arn:aws:ecs:(?P<region>[^:]+):(?P<account_id>[^:]+):(?P<service>[^:]+)/(?P<id>.*)$",
resource_arn,
)
if not match:
raise JsonRESTError(
"InvalidParameterException", "The ARN provided is invalid."
)
return match.groupdict()
def list_tags_for_resource(self, resource_arn):
"""Currently implemented only for task definitions and services"""
parsed_arn = self._parse_resource_arn(resource_arn)
if parsed_arn["service"] == "task-definition":
for task_definition in self.task_definitions.values():
for revision in task_definition.values():
if revision.arn == resource_arn:
return revision.tags
else:
raise TaskDefinitionNotFoundException()
elif parsed_arn["service"] == "service":
for service in self.services.values():
if service.arn == resource_arn:
return service.tags
else:
raise ServiceNotFoundException(service_name=parsed_arn["id"])
raise NotImplementedError()
def _get_last_task_definition_revision_id(self, family):
definitions = self.task_definitions.get(family, {})
if definitions:
return max(definitions.keys())
def tag_resource(self, resource_arn, tags):
"""Currently implemented only for services"""
parsed_arn = self._parse_resource_arn(resource_arn)
if parsed_arn["service"] == "service":
for service in self.services.values():
if service.arn == resource_arn:
service.tags = self._merge_tags(service.tags, tags)
return {}
else:
raise ServiceNotFoundException(service_name=parsed_arn["id"])
raise NotImplementedError()
def _merge_tags(self, existing_tags, new_tags):
merged_tags = new_tags
new_keys = self._get_keys(new_tags)
for existing_tag in existing_tags:
if existing_tag["key"] not in new_keys:
merged_tags.append(existing_tag)
return merged_tags
@staticmethod
def _get_keys(tags):
return [tag["key"] for tag in tags]
def untag_resource(self, resource_arn, tag_keys):
"""Currently implemented only for services"""
parsed_arn = self._parse_resource_arn(resource_arn)
if parsed_arn["service"] == "service":
for service in self.services.values():
if service.arn == resource_arn:
service.tags = [
tag for tag in service.tags if tag["key"] not in tag_keys
]
return {}
else:
raise ServiceNotFoundException(service_name=parsed_arn["id"])
raise NotImplementedError()
ecs_backends = {}
for region in Session().get_available_regions("ecs"):
ecs_backends[region] = EC2ContainerServiceBackend(region)
for region in Session().get_available_regions("ecs", partition_name="aws-us-gov"):
ecs_backends[region] = EC2ContainerServiceBackend(region)
for region in Session().get_available_regions("ecs", partition_name="aws-cn"):
ecs_backends[region] = EC2ContainerServiceBackend(region)
|
py | 1a38543343b2af6b27f3d7363761789362dd4473 | """
AES IGE implementation in Python.
If available, tgcrypto will be used instead, otherwise
if available, cryptg will be used instead, otherwise
if available, libssl will be used instead, otherwise
the Python implementation will be used.
"""
import os
import pyaes
import logging
from . import libssl
__log__ = logging.getLogger(__name__)
try:
import tgcrypto
__log__.debug('tgcrypto detected, it will be used for encryption')
except ImportError:
tgcrypto = None
try:
import cryptg
__log__.debug('cryptg detected, it will be used for encryption')
except ImportError:
cryptg = None
if libssl.encrypt_ige and libssl.decrypt_ige:
__log__.debug('libssl detected, it will be used for encryption')
else:
__log__.debug('tgcrypto or cryptg modules not installed and libssl not found, '
'falling back to (slower) Python encryption')
class AES:
"""
Class that servers as an interface to encrypt and decrypt
text through the AES IGE mode.
"""
@staticmethod
def decrypt_ige(cipher_text, key, iv):
"""
Decrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
"""
if tgcrypto:
return tgcrypto.ige256_decrypt(cipher_text, key, iv)
if cryptg:
return cryptg.decrypt_ige(cipher_text, key, iv)
if libssl.decrypt_ige:
return libssl.decrypt_ige(cipher_text, key, iv)
iv1 = iv[:len(iv) // 2]
iv2 = iv[len(iv) // 2:]
aes = pyaes.AES(key)
plain_text = []
blocks_count = len(cipher_text) // 16
cipher_text_block = [0] * 16
for block_index in range(blocks_count):
for i in range(16):
cipher_text_block[i] = \
cipher_text[block_index * 16 + i] ^ iv2[i]
plain_text_block = aes.decrypt(cipher_text_block)
for i in range(16):
plain_text_block[i] ^= iv1[i]
iv1 = cipher_text[block_index * 16:block_index * 16 + 16]
iv2 = plain_text_block
plain_text.extend(plain_text_block)
return bytes(plain_text)
@staticmethod
def encrypt_ige(plain_text, key, iv):
"""
Encrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
"""
padding = len(plain_text) % 16
if padding:
plain_text += os.urandom(16 - padding)
if tgcrypto:
return tgcrypto.ige256_encrypt(plain_text, key, iv)
if cryptg:
return cryptg.encrypt_ige(plain_text, key, iv)
if libssl.encrypt_ige:
return libssl.encrypt_ige(plain_text, key, iv)
iv1 = iv[:len(iv) // 2]
iv2 = iv[len(iv) // 2:]
aes = pyaes.AES(key)
cipher_text = []
blocks_count = len(plain_text) // 16
for block_index in range(blocks_count):
plain_text_block = list(
plain_text[block_index * 16:block_index * 16 + 16]
)
for i in range(16):
plain_text_block[i] ^= iv1[i]
cipher_text_block = aes.encrypt(plain_text_block)
for i in range(16):
cipher_text_block[i] ^= iv2[i]
iv1 = cipher_text_block
iv2 = plain_text[block_index * 16:block_index * 16 + 16]
cipher_text.extend(cipher_text_block)
return bytes(cipher_text)
|
py | 1a3856e90261d7f7e48d22ad1b4aad1f46c8643a | """edsr_slim.py"""
# import mindspore
from src import common
# from src.edsr_model import Upsampler, default_conv
import mindspore.nn as nn
import mindspore.ops as ops
# import mindspore.ops.operations as P
from mindspore import Tensor
class EDSR(nn.Cell):
"""[EDSR]
Args:
nn ([type]): [description]
"""
def __init__(self, args):
super(EDSR, self).__init__()
self.n_colors = args.n_colors
n_resblocks = args.n_resblocks
self.n_feats = args.n_feats
self.kernel_size = 3
scale = args.scale[0]
act = nn.ReLU()
self.rgb_range = args.rgb_range
# self.head = nn.Conv2d(in_channels=args.n_colors, out_channels=self.n_feats, kernel_size=self.kernel_size, pad_mode='pad', padding=self.kernel_size // 2, has_bias=True)
self.head = common.conv(args.n_colors, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
m_body = [
common.ResidualBlock(
self.n_feats, self.kernel_size, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
self.body = nn.CellList(m_body)
# self.body = m_body ###如果用这行,body这部分参数不会被训练
self.body_conv = common.conv(self.n_feats, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
self.upsampler = common.Upsampler(scale, self.n_feats)
self.tail_conv = common.conv(self.n_feats, args.n_colors, self.kernel_size, padding=self.kernel_size//2)
def construct(self, x, width_mult=Tensor(1.0)):
"""construct"""
# def construct(self, x, width_mult):
width_mult = width_mult.asnumpy().item()
feature_width = int(self.n_feats * width_mult)
conv2d = ops.Conv2D(out_channel=feature_width, kernel_size=self.kernel_size, mode=1, pad_mode='pad',
pad=self.kernel_size // 2)
biasadd = ops.BiasAdd()
x = common.mean_shift(x, self.rgb_range)
#原来写的是weight.clone()[]
weight = self.head.weight[:feature_width, :self.n_colors, :, :]
bias = self.head.bias[:feature_width]
x = conv2d(x, weight)
x = biasadd(x, bias)
residual = x
for block in self.body:
residual = block(residual, width_mult)
weight = self.body_conv.weight[:feature_width, :feature_width, :, :]
bias = self.body_conv.bias[:feature_width]
residual = conv2d(residual, weight)
residual = biasadd(residual, bias)
residual += x
x = self.upsampler(residual, width_mult)
weight = self.tail_conv.weight[:self.n_colors, :feature_width, :, :]
bias = self.tail_conv.bias[:self.n_colors]
conv2d = ops.Conv2D(out_channel=self.n_colors, kernel_size=self.kernel_size,
mode=1, pad_mode='pad', pad=self.kernel_size//2)
x = conv2d(x, weight)
x = biasadd(x, bias)
x = common.mean_shift(x, self.rgb_range, sign=1)
return x
|
py | 1a385735eb65ed3583fc6cca0b83e8a01fc9eafa | #!/usr/bin/env python3
# coding: UTF-8
from configparser import ConfigParser
from contextlib import contextmanager
import os
import datetime
from os.path import abspath, basename, exists, dirname, join, isdir, expanduser
import platform
import sys
import subprocess
import time
import logging
import logging.config
import click
import termcolor
import colorlog
import pymysql
import telnetlib
logger = logging.getLogger('.utils')
DEBUG_ENABLED = os.environ.get('SEAFILE_DOCKER_VERBOSE', '').lower() in ('true', '1', 'yes')
def eprint(*a, **kw):
kw['file'] = sys.stderr
print(*a, **kw)
def identity(msg, *a, **kw):
return msg
colored = identity if not os.isatty(sys.stdin.fileno()) else termcolor.colored
red = lambda s: colored(s, 'red')
green = lambda s: colored(s, 'green')
def underlined(msg):
return '\x1b[4m{}\x1b[0m'.format(msg)
def sudo(*a, **kw):
call('sudo ' + a[0], *a[1:], **kw)
def _find_flag(args, *opts, **kw):
is_flag = kw.get('is_flag', False)
if is_flag:
return any([opt in args for opt in opts])
else:
for opt in opts:
try:
return args[args.index(opt) + 1]
except ValueError:
pass
def call(*a, **kw):
dry_run = kw.pop('dry_run', False)
quiet = kw.pop('quiet', DEBUG_ENABLED)
cwd = kw.get('cwd', os.getcwd())
check_call = kw.pop('check_call', True)
reduct_args = kw.pop('reduct_args', [])
if not quiet:
toprint = a[0]
args = [x.strip('"') for x in a[0].split() if '=' not in x]
for arg in reduct_args:
value = _find_flag(args, arg)
toprint = toprint.replace(value, '{}**reducted**'.format(value[:3]))
logdbg('calling: ' + green(toprint))
logdbg('cwd: ' + green(cwd))
kw.setdefault('shell', True)
if not dry_run:
if check_call:
return subprocess.check_call(*a, **kw)
else:
return subprocess.Popen(*a, **kw).wait()
@contextmanager
def cd(path):
path = expanduser(path)
olddir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(olddir)
def must_makedir(p):
p = expanduser(p)
if not exists(p):
logger.info('created folder %s', p)
os.makedirs(p)
else:
logger.debug('folder %s already exists', p)
def setup_colorlog():
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': "%(log_color)s[%(asctime)s]%(reset)s %(blue)s%(message)s",
'datefmt': '%m/%d/%Y %H:%M:%S',
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'colored',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'django.request': {
'handlers': ['default'],
'level': 'WARN',
'propagate': False
},
}
})
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def setup_logging(level=logging.INFO):
kw = {
'format': '[%(asctime)s][%(module)s]: %(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
'level': level,
'stream': sys.stdout
}
logging.basicConfig(**kw)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def get_process_cmd(pid, env=False):
env = 'e' if env else ''
try:
return subprocess.check_output('ps {} -o command {}'.format(env, pid),
shell=True).decode('utf8').strip().splitlines()[1]
# except Exception, e:
# print(e)
except:
return None
def get_match_pids(pattern):
pgrep_output = subprocess.check_output(
'pgrep -f "{}" || true'.format(pattern),
shell=True).decode('utf8').strip()
return [int(pid) for pid in pgrep_output.splitlines()]
def ask_for_confirm(msg):
confirm = click.prompt(msg, default='Y')
return confirm.lower() in ('y', 'yes')
def confirm_command_to_run(cmd):
if ask_for_confirm('Run the command: {} ?'.format(green(cmd))):
call(cmd)
else:
sys.exit(1)
def git_current_commit():
return get_command_output('git rev-parse --short HEAD').strip()
def get_command_output(cmd):
shell = not isinstance(cmd, list)
return subprocess.check_output(cmd, shell=shell).decode('utf8')
def ask_yes_or_no(msg, prompt='', default=None):
print('\n' + msg + '\n')
while True:
answer = input(prompt + ' [yes/no] ').lower()
if not answer:
continue
if answer not in ('yes', 'no', 'y', 'n'):
continue
if answer in ('yes', 'y'):
return True
else:
return False
def git_branch_exists(branch):
return call('git rev-parse --short --verify {}'.format(branch)) == 0
def to_unicode(s):
if isinstance(s, str):
return s.decode('utf-8')
else:
return s
def to_utf8(s):
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return s
def git_commit_time(refspec):
return int(get_command_output('git log -1 --format="%ct" {}'.format(
refspec)).strip())
def get_seafile_version():
return os.environ['SEAFILE_VERSION']
def get_install_dir():
return join('/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-{}'.format(get_seafile_version()))
def get_script(script):
return join(get_install_dir(), script)
_config = None
def get_conf(key, default=None):
key = key.upper()
return os.environ.get(key, default)
def _add_default_context(context):
default_context = {
'current_timestr': datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S'),
}
for k in default_context:
context.setdefault(k, default_context[k])
def render_template(template, target, context):
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(dirname(template)))
_add_default_context(context)
content = env.get_template(basename(template)).render(**context)
with open(target, 'w') as fp:
fp.write(content)
def logdbg(msg):
if DEBUG_ENABLED:
msg = '[debug] ' + msg
loginfo(msg)
def loginfo(msg):
msg = '[{}] {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), green(msg))
eprint(msg)
def cert_has_valid_days(cert, days):
assert exists(cert)
secs = 86400 * int(days)
retcode = call('openssl x509 -checkend {} -noout -in {}'.format(secs, cert), check_call=False)
return retcode == 0
def get_version_stamp_file():
return '/shared/seafile/seafile-data/current_version'
def read_version_stamp(fn=get_version_stamp_file()):
assert exists(fn), 'version stamp file {} does not exist!'.format(fn)
with open(fn, 'r') as fp:
return fp.read().strip()
def update_version_stamp(version, fn=get_version_stamp_file()):
with open(fn, 'w') as fp:
fp.write(version + '\n')
def wait_for_mysql():
db_host = get_conf('DB_HOST', '127.0.0.1')
db_user = 'root'
db_passwd = get_conf('DB_ROOT_PASSWD', '')
while True:
try:
pymysql.connect(host=db_host, port=3306, user=db_user, passwd=db_passwd)
except Exception as e:
print ('waiting for mysql server to be ready: %s', e)
time.sleep(2)
continue
logdbg('mysql server is ready')
return
def wait_for_memcached():
while True:
try:
with telnetlib.Telnet(host='memcached', port=11211, timeout=3) as tn:
pass
except Exception as e:
print ('waiting for memcached to be ready: %s', e)
time.sleep(2)
continue
logdbg('memcached is ready')
return
def wait_for_nginx():
while True:
logdbg('waiting for nginx server to be ready')
output = get_command_output('netstat -nltp')
if ':80 ' in output:
logdbg(output)
logdbg('nginx is ready')
return
time.sleep(2)
def replace_file_pattern(fn, pattern, replacement):
with open(fn, 'r') as fp:
content = fp.read()
with open(fn, 'w') as fp:
fp.write(content.replace(pattern, replacement))
|
py | 1a3857e589eafd1337580ded019de362257897fb | # File: taniumthreatresponse_consts.py
# Copyright (c) 2020-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
# Constants relating to '_get_error_message_from_exception'
ERR_CODE_MSG = "Error code unavailable"
ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
TYPE_ERR_MSG = "Error occurred while connecting to the Tanium Threat Response Server. Please check the asset configuration and|or the action parameters"
# Constants relating to '_validate_integer'
VALID_INTEGER_MSG = "Please provide a valid integer value in the {}"
NON_NEGATIVE_INTEGER_MSG = "Please provide a valid non-negative integer value in the {}"
NON_ZERO_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
CONNTIMEOUT_KEY = "'conntimeout' action parameter"
PROCESS_TABLE_ID_KEY = "'process_table_id' action parameter"
LIMIT_KEY = "'limit' action parameter"
OFFSET_KEY = "'offset' action parameter"
FILE_ID_KEY = "'file_id' action parameter"
INTEL_DOC_ID_KEY = "'intel_doc_id' action parameter"
# Constants relating to value_list check
DSTTYPE_VALUE_LIST = ["computer_name", "ip_address"]
EVENT_TYPE_VALUE_LIST = ["combined", "dns", "driver", "file", "network", "process", "registry", "sid", "image"]
FILTER_TYPE_VALUE_LIST = ["any", "all"]
|
py | 1a3857fd62f36c8cbbaba1098577396963c7e7e0 | def count_digits(n):
if(n == 0):
return n
if(n < 0):
n *= -1
if(n < 10):
return 1
return 1 + count_digits(n/10) |
py | 1a38589f9ab4309e26dd998d134729f903d22721 | #
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from QUANTAXIS.QAFetch.QAQuery import QA_fetch_stock_list
#from QUANTAXIS.QASU import crawl_eastmoney as crawl_eastmoney_file
from QUANTAXIS.QASU import save_tdx as stdx
from QUANTAXIS.QASU import save_tdx_file as tdx_file
from QUANTAXIS.QASU import save_gm as sgm
from QUANTAXIS.QASU import save_jq as sjq
from QUANTAXIS.QASU import save_tushare as sts
from QUANTAXIS.QASU import save_financialfiles
from QUANTAXIS.QAUtil import DATABASE
# from QUANTAXIS.QASU import crawl_jrj_financial_reportdate as save_financial_calendar
# from QUANTAXIS.QASU import crawl_jrj_stock_divyield as save_stock_divyield
def QA_SU_save_stock_info(engine, client=DATABASE):
"""save stock info
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_info(client=client)
def QA_SU_save_stock_info_tushare(engine="tushare", client=DATABASE):
'''
:param engine: tushare
:param client:
:return: None
'''
# only support the tushare
engine = select_save_engine("tushare")
engine.QA_SU_save_stock_info_tushare()
pass
def QA_SU_save_stock_list(engine, client=DATABASE):
"""save stock_list
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_list(client=client)
def QA_SU_save_index_list(engine, client=DATABASE):
"""save index_list
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_index_list(client=client)
def QA_SU_save_future_list(engine, client=DATABASE):
"""save future_list
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_list(client=client)
def QA_SU_save_future_day(engine, client=DATABASE):
"""save future_day
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_day(client=client)
def QA_SU_save_future_day_all(engine, client=DATABASE):
"""save future_day_all
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_day_all(client=client)
def QA_SU_save_future_min(engine, client=DATABASE):
"""save future_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_min(client=client)
def QA_SU_save_future_min_all(engine, client=DATABASE):
"""[summary]
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_min_all(client=client)
def QA_SU_save_stock_day(engine, client=DATABASE):
"""save stock_day
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_day(client=client)
def QA_SU_save_option_contract_list(engine, client=DATABASE):
'''
:param engine:
:param client:
:return:
'''
engine = select_save_engine(engine)
engine.QA_SU_save_option_contract_list(client=client)
def QA_SU_save_option_day(engine, client=DATABASE):
'''
:param engine:
:param client:
:return:
'''
engine = select_save_engine(engine)
engine.QA_SU_save_option_day(client=client)
def QA_SU_save_option_min(engine, client=DATABASE):
'''
:param engine:
:param client:
:return:
'''
engine = select_save_engine(engine)
engine.QA_SU_save_option_min(client=client)
def QA_SU_save_option_commodity_min(engine, client=DATABASE):
'''
:param engine:
:param client:
:return:
'''
engine = select_save_engine(engine)
engine.QA_SU_save_option_commodity_min(client=client)
def QA_SU_save_option_commodity_day(engine, client=DATABASE):
'''
:param engine:
:param client:
:return:
'''
engine = select_save_engine(engine)
engine.QA_SU_save_option_commodity_day(client=client)
def QA_SU_save_stock_min(engine, client=DATABASE):
"""save stock_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_min(client=client)
def QA_SU_save_index_day(engine, client=DATABASE):
"""save index_day
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_index_day(client=client)
def QA_SU_save_index_min(engine, client=DATABASE):
"""save index_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_index_min(client=client)
def QA_SU_save_etf_day(engine, client=DATABASE):
"""save etf_day
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_etf_day(client=client)
def QA_SU_save_etf_min(engine, client=DATABASE):
"""save etf_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_etf_min(client=client)
def QA_SU_save_stock_xdxr(engine, client=DATABASE):
"""save stock_xdxr
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_xdxr(client=client)
def QA_SU_save_stock_block(engine, client=DATABASE):
"""save stock_block
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_block(client=client)
def select_save_engine(engine):
'''
select save_engine , tushare ts Tushare 使用 Tushare 免费数据接口, tdx 使用通达信数据接口
:param engine: 字符串Str
:return: sts means save_tushare_py or stdx means save_tdx_py
'''
if engine in ['tushare', 'ts', 'Tushare']:
return sts
elif engine in ['tdx']:
return stdx
elif engine in ['gm', 'goldenminer']:
return sgm
elif engine in ['jq', 'joinquant']:
return sjq
else:
print('QA Error QASU.main.py call select_save_engine with parameter %s is None of thshare, ts, Thshare, or tdx', engine)
def QA_SU_save_stock_min_5(file_dir, client=DATABASE):
"""save stock_min5
Arguments:
file_dir {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
return tdx_file.QA_save_tdx_to_mongo(file_dir, client)
def QA_SU_crawl_eastmoney(action="zjlx", stockCode=None):
'''
:param action: zjlx 后期支持其他的操作类型
:param stockCode: 股票代码
:return:
'''
stockItems = QA_fetch_stock_list()
if stockCode == "all":
# 读取tushare股票列表代码
print("💪 一共需要获取 %d 个股票的 资金流向 , 需要大概 %d 小时" %
(len(stockItems), (len(stockItems)*5)/60/60))
code_list = []
for stock in stockItems:
code_list.append(stock['code'])
# print(stock['code'])
crawl_eastmoney_file.QA_read_eastmoney_zjlx_web_page_to_sqllite(
code_list)
# print(stock)
return
else:
# todo 检查股票代码是否合法
# return crawl_eastmoney_file.QA_read_eastmoney_zjlx_web_page_to_sqllite(stockCode=stockCode)
code_list = []
code_list.append(stockCode)
return crawl_eastmoney_file.QA_request_eastmoney_zjlx(param_stock_code_list=code_list)
def QA_SU_save_financialfiles():
return save_financialfiles.QA_SU_save_financial_files()
def QA_SU_save_report_calendar_day():
return save_financial_calendar.QA_SU_save_report_calendar_day()
def QA_SU_save_report_calendar_his():
return save_financial_calendar.QA_SU_save_report_calendar_his()
def QA_SU_save_stock_divyield_day():
return save_stock_divyield.QA_SU_save_stock_divyield_day()
def QA_SU_save_stock_divyield_his():
return save_stock_divyield.QA_SU_save_stock_divyield_his()
|
py | 1a38590df57862bf7c94ea473a825e484a5ec08a | # WxPython Demo
from typing import List, Optional
import os.path
import wx
import QRCodeLib.qrcodelib as qr
from QRCodeLib.qrcodelib import Symbols
class FormMain(wx.Frame):
def __init__(self, **kw) -> None:
super().__init__(**kw)
self._init_widgets()
self._images: List[wx.Bitmap] = []
self._module_size = int()
def _init_widgets(self) -> None:
# self
self.Title = "QR Code"
self.SetSize(700, 550)
self.SetMinSize(self.GetSize())
font = wx.Font(
10,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL
)
self.SetFont(font)
# create panel
self._pnl_top = self._create_top_panel()
self._pnl_middle = self._create_middle_panel()
self._pnl_bottom = self._create_bottom_panel()
# sizer
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
sizer.Add(self._pnl_top, proportion=1, flag=wx.EXPAND)
sizer.Add(self._pnl_middle, flag=wx.EXPAND)
sizer.Add(self._pnl_bottom, flag=wx.EXPAND)
def _create_top_panel(self) -> wx.Panel:
panel = wx.Panel(self)
return panel
def _create_middle_panel(self) -> wx.Panel:
panel = wx.Panel(self, size=(self.GetSize().Width, 120))
# lbl_data
lbl_data = wx.StaticText(panel, label='Data :')
# txt_data
self._txt_data = wx.TextCtrl(
panel,
style=wx.TE_MULTILINE | wx.TE_PROCESS_TAB
)
self._txt_data.SetFocus()
self._txt_data.Bind(wx.EVT_TEXT, self.update_image)
# sizer
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
sizer.Add(
lbl_data,
flag=wx.TOP | wx.LEFT,
border=10
)
sizer.Add(
self._txt_data,
proportion=1,
flag=wx.EXPAND | wx.BOTTOM | wx.LEFT | wx.RIGHT,
border=10
)
return panel
def _create_bottom_panel(self) -> wx.Panel:
panel = wx.Panel(
self,
size=(self.GetSize().width, 70)
)
# lbl_ec_levell
wx.StaticText(panel, label="Error Correction Level :", pos=(10, 9), size=(143, 21))
# cmb_ec_level
self._cmb_ec_level = wx.ComboBox(
panel,
pos=(160, 5),
size=(48, 21),
choices=["L", "M", "Q", "H"],
style=wx.CB_READONLY
)
self._cmb_ec_level.SetValue("M")
self._cmb_ec_level.Bind(wx.EVT_COMBOBOX, self.update_image)
# lbl_byte_enc
self._lbl_byte_enc = wx.StaticText(
panel,
label="Byte mode Encoding :",
pos=(225, 8)
)
# cmb_byte_enc
self._cmb_byte_enc = wx.ComboBox(
panel,
pos=(358, 5),
size=(315, 21),
choices=["Shift_JIS", "UTF-8"],
style=wx.CB_READONLY
)
self._cmb_byte_enc.SetValue("Shift_JIS")
self._cmb_byte_enc.Bind(wx.EVT_COMBOBOX, self.update_image)
# lbl_max_ver
wx.StaticText(panel, label="Max Version :", pos=(10, 39))
# cmb_max_ver
self._cmb_max_ver = wx.ComboBox(
panel,
pos=(160, 35),
size=(48, 21),
choices=[str(item + 1) for item in range(40)],
style=wx.CB_READONLY
)
self._cmb_max_ver.SetValue(str(40))
self._cmb_max_ver.Bind(wx.EVT_COMBOBOX, self.update_image)
# chk_structured_append
self._chk_structured_append = wx.CheckBox(
panel,
label="Structured Append",
pos=(225, 39)
)
self._chk_structured_append.SetValue(False)
self._chk_structured_append.Bind(wx.EVT_CHECKBOX, self.update_image)
# lbl_module_size
wx.StaticText(panel, label="Module Size :", pos=(380, 39))
# spn_module_size
self._spn_module_size = wx.SpinCtrlDouble(
panel,
pos=(460, 35),
size=(48, 21),
min=1,
max=100,
initial=5
)
self._spn_module_size.Bind(wx.EVT_SPINCTRLDOUBLE, self.update_image)
# btn_save
self._btn_save = wx.Button(
panel,
label="Save",
pos=(553, 35),
size=(120, 23)
)
self._btn_save.Bind(wx.EVT_BUTTON, self.on_btn_save_clicked)
return panel
def create_symbols(self) -> Optional[Symbols]:
data = self._txt_data.GetValue()
if not data:
return None
ec_level = qr.ErrorCorrectionLevel.to_int(self._cmb_ec_level.GetValue())
max_ver = int(self._cmb_max_ver.GetValue())
structured_append = self._chk_structured_append.GetValue()
enc_mode = self._cmb_byte_enc.GetValue()
symbols = qr.Symbols(ec_level, max_ver, structured_append, enc_mode)
try:
symbols.append_text(self._txt_data.GetValue())
except Exception as e:
wx.MessageBox(str(e), parent=self)
return None
return symbols
def update_image(self, event) -> None:
self._pnl_top.DestroyChildren()
symbols = self.create_symbols()
if not symbols:
return
self._images.clear()
sizer = wx.BoxSizer(wx.HORIZONTAL)
self._pnl_top.SetSizer(sizer)
self._pnl_top.Freeze()
module_size = int(self._spn_module_size.GetValue())
for symbol in symbols:
(data, width, height) = symbol.get_rgb_bytes(module_size)
bitmap = wx.Bitmap.FromBuffer(width, height, data)
self._images.append(bitmap)
for image in self._images:
static_bitmap = wx.StaticBitmap(self._pnl_top, bitmap=image)
sizer.Add(static_bitmap, flag=wx.ALL, border=2)
self._pnl_top.Layout()
self._pnl_top.Thaw()
def on_btn_save_clicked(self, event) -> None:
symbols = self.create_symbols()
if not symbols:
return
wildcard = (
"Monochrome Bitmap (*.bmp)|*.bmp|"
"24-bit Bitmap (*.bmp)|*.bmp|"
"Portable Pixmap (*.ppm)|*.ppm|"
"X11 Bitmap (*.xbm)|*.xbm|"
"SVG (*.svg)|*.svg"
)
dlg = wx.FileDialog(self, wildcard=wildcard,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_CANCEL:
return
(root, ext) = os.path.splitext(dlg.GetPath())
module_size = int(self._spn_module_size.GetValue())
for i, symbol in enumerate(symbols):
if symbols.count == 1:
path = root
else:
path = root + "_" + str(i)
if dlg.FilterIndex == 0:
path += ".bmp"
symbol.save_bitmap(path, module_size, True)
if dlg.FilterIndex == 1:
path += ".bmp"
symbol.save_bitmap(path, module_size, False)
if dlg.FilterIndex == 2:
path += ".ppm"
symbol.save_ppm(path, module_size)
if dlg.FilterIndex == 3:
path += ".xbm"
symbol.save_xbm(path, module_size)
if dlg.FilterIndex == 4:
path += ".svg"
symbol.save_svg(path, module_size)
dlg.Destroy()
def main() -> None:
app = wx.App()
form = FormMain(parent=None)
form.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
py | 1a385acd8f13a5c7514301a687ed7e4dfd90ae50 | #!/usr/bin/env python
# $Id$
"""9 solutions"""
import puzzler
from puzzler.puzzles.solid_pentominoes import SolidPentominoes3x3x10Steps
puzzler.run(SolidPentominoes3x3x10Steps)
|
py | 1a385acf058d7c8902e6d45a5ac660ccee14a741 | # MIT License
# Copyright (c) 2020 Mitchell Lane
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
class Config:
def __init__(self, fileName, contents, configType):
self.fileName = fileName
self.contents = contents
self.configType = configType
with open("config_bundle.ini", "r") as bundleContents:
line = bundleContents.readline()
while line and not line.startswith("["):
line = bundleContents.readline()
configurationsFound = []
while line:
rawConfigHeader = line[1:-2]
if rawConfigHeader == "presets":
break
print(line)
configHeaderComponents = rawConfigHeader.split(":", 1)
configType = configHeaderComponents[0]
fileName = (configHeaderComponents[1] + ".ini").replace(" ", "_")
print("Found config section: " + configHeaderComponents[1])
line = bundleContents.readline()
contents=[]
while line and not line.startswith("["):
contents.append(line)
line = bundleContents.readline()
configurationsFound.append(Config(fileName, contents, configType))
print("//////////////////////////////////////////")
print("-----------------------------------\n" + "Found: " + str(len(configurationsFound)) + " configurations in total")
outputDir = "config_files"
for configuration in configurationsFound:
outputFileName = os.path.join(outputDir, configuration.fileName)
print("Writing configuration to '" + outputFileName + "'")
with open(outputFileName, 'w') as f:
for configLine in configuration.contents:
if configLine.rstrip():
f.write(configLine)
print("All configuration written to seperate files")
|
py | 1a385af71ebec1ef61f9f4d86bc2b29085516ee3 | a = int(input("Enter first number\n"))
b = int(input("Enter second number\n"))
a, b = b, a
print("a = {}\nb = {}".format(a, b)) |
py | 1a385b8c0ae76dc342f35c59135126b97812b4de | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ChequeTablePay(Document):
pass
|
py | 1a385c07e03052c9e474fcd131c3f15ca86af15c | #/*
# *
# * TuneIn Radio for Kodi.
# *
# * Copyright (C) 2013 Diego Fernando Nieto
# *
# * This program is free software: you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation, either version 3 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# */
from random import choice as choise
import urllib.request, urllib.error, urllib.parse
import xml.dom.minidom as minidom
class StreamTheWorld:
## Example XML document we are parsing follows, as the minidom code is so beautiful to follow
# http://playerservices.streamtheworld.com/api/livestream?version=1.4&mount=CARACOL_RADIOAAC&lang=EN
#
#<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
#<live_stream_config version="1.4">
# <mountpoints>
# <mountpoint>
# <status>
# <status-code>200</status-code>
# <status-message>OK</status-message>
# </status>
#
# <transports>
# <transport>http</transport>
# </transports>
#
# <servers>
# <server sid="3653">
# <ip>3653.live.streamtheworld.com</ip>
# <ports>
# <port type="http">80</port>
# <port type="http">3690</port>
# <port type="http">443</port>
# </ports>
# </server>
#
# <server sid="1351">
# <ip>1351.live.streamtheworld.com</ip>
# <ports>
# <port type="http">80</port>
# <port type="http">3690</port>
# <port type="http">443</port>
# </ports>
# </server>
# </servers>
#
# <mount>CARACOL_RADIOAAC</mount>
# <format>FLV</format>
# <bitrate>32000</bitrate>
# <media-format container="flv" cuepoints="andoxml">
# <audio index="0" samplerate="44100" codec="heaacv2" bitrate="32000" channels="2"/>
# </media-format>
# <authentication>0</authentication>
# <timeout>0</timeout>
# </mountpoint>
# </mountpoints>
#</live_stream_config>
''' Parse streamtheworld URL to HTTP Stream'''
def __init__(self, cs):
self.__cs__ = cs
return
def __validate_callsign(self, cs, acc=True):
'''
Normal callsign format is 'WWWWAAA', where 'WWWW' is the radio station
callsign and 'AAA' is always 'AAC'.
'''
if not cs or not isinstance(cs, str):
raise ValueError('callsign \'%s\' is not a string.' % cs)
if len(cs) < 6:
raise ValueError('callsign \'%s\' is too short.' % cs)
if acc and not cs.endswith('AAC'):
cs = cs + 'AAC'
return cs
def __make_request(self, callsign):
''' Make a Call to StreamTheWorld API v1.5'''
host = 'playerservices.streamtheworld.com'
req = urllib.request.Request(
'http://%s/api/livestream?version=1.5&mount=%s&lang=en' %
(host, callsign))
req.add_header('User-Agent', 'Mozilla/5.0')
return req
def __t(self, element):
'''get the text of a DOM element'''
return element.firstChild.data
def __check_status(self, ele):
''' should only be one status element inside a mountpoint'''
status = ele.getElementsByTagName('status')[0]
if self.__t(status.getElementsByTagName('status-code')[0]) != '200':
msg = self.__t(status.getElementsByTagName('status-message')[0])
raise Exception('Error locating stream: ' + msg)
def __create_stream_urls(self, srcfile):
''' Return an array with all URLs'''
doc = minidom.parse(srcfile)
mp = doc.getElementsByTagName('mountpoint')[0]
self.__check_status(mp)
mt = self.__t(mp.getElementsByTagName('mount')[0])
allurls = []
for s in mp.getElementsByTagName('server'):
# a thing of beauty, right?
ip = self.__t(s.getElementsByTagName('ip')[0])
ports = [self.__t(p) for p in s.getElementsByTagName('port')]
# yes, it is always HTTP. We see ports 80, 443, and 3690 usually
urls = ['http://%s:%s/%s' % (ip, p, mt) for p in ports]
allurls.extend(urls)
return allurls
def get_stream_url(self, cs):
''' Get one URL from CS'''
try:
callsign = self.__validate_callsign(cs)
req = self.__make_request(callsign)
result = urllib.request.urlopen(req)
urls = self.__create_stream_urls(result)
except:
callsign = self.__validate_callsign(cs, False)
req = self.__make_request(callsign)
result = urllib.request.urlopen(req)
urls = self.__create_stream_urls(result)
if len(urls) > 0:
u = choise(urls)
if not u.endswith('_SC'):
u = u + '_SC'
return u
|
py | 1a385c9ce97e19a995937e60b11ff57f507ba48d |
from plotly.graph_objs import Layout
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Data(_BaseLayoutHierarchyType):
# area
# ----
@property
def area(self):
"""
The 'area' property is a tuple of instances of
Area that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Area
- A list or tuple of dicts of string/value properties that
will be passed to the Area constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Area]
"""
return self['area']
@area.setter
def area(self, val):
self['area'] = val
# barpolar
# --------
@property
def barpolar(self):
"""
The 'barpolar' property is a tuple of instances of
Barpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Barpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Barpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Barpolar]
"""
return self['barpolar']
@barpolar.setter
def barpolar(self, val):
self['barpolar'] = val
# bar
# ---
@property
def bar(self):
"""
The 'bar' property is a tuple of instances of
Bar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Bar
- A list or tuple of dicts of string/value properties that
will be passed to the Bar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Bar]
"""
return self['bar']
@bar.setter
def bar(self, val):
self['bar'] = val
# box
# ---
@property
def box(self):
"""
The 'box' property is a tuple of instances of
Box that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Box
- A list or tuple of dicts of string/value properties that
will be passed to the Box constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Box]
"""
return self['box']
@box.setter
def box(self, val):
self['box'] = val
# candlestick
# -----------
@property
def candlestick(self):
"""
The 'candlestick' property is a tuple of instances of
Candlestick that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Candlestick
- A list or tuple of dicts of string/value properties that
will be passed to the Candlestick constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Candlestick]
"""
return self['candlestick']
@candlestick.setter
def candlestick(self, val):
self['candlestick'] = val
# carpet
# ------
@property
def carpet(self):
"""
The 'carpet' property is a tuple of instances of
Carpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Carpet
- A list or tuple of dicts of string/value properties that
will be passed to the Carpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Carpet]
"""
return self['carpet']
@carpet.setter
def carpet(self, val):
self['carpet'] = val
# choropleth
# ----------
@property
def choropleth(self):
"""
The 'choropleth' property is a tuple of instances of
Choropleth that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Choropleth
- A list or tuple of dicts of string/value properties that
will be passed to the Choropleth constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Choropleth]
"""
return self['choropleth']
@choropleth.setter
def choropleth(self, val):
self['choropleth'] = val
# cone
# ----
@property
def cone(self):
"""
The 'cone' property is a tuple of instances of
Cone that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Cone
- A list or tuple of dicts of string/value properties that
will be passed to the Cone constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Cone]
"""
return self['cone']
@cone.setter
def cone(self, val):
self['cone'] = val
# contourcarpet
# -------------
@property
def contourcarpet(self):
"""
The 'contourcarpet' property is a tuple of instances of
Contourcarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contourcarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Contourcarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contourcarpet]
"""
return self['contourcarpet']
@contourcarpet.setter
def contourcarpet(self, val):
self['contourcarpet'] = val
# contour
# -------
@property
def contour(self):
"""
The 'contour' property is a tuple of instances of
Contour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contour
- A list or tuple of dicts of string/value properties that
will be passed to the Contour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contour]
"""
return self['contour']
@contour.setter
def contour(self, val):
self['contour'] = val
# heatmapgl
# ---------
@property
def heatmapgl(self):
"""
The 'heatmapgl' property is a tuple of instances of
Heatmapgl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmapgl
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmapgl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmapgl]
"""
return self['heatmapgl']
@heatmapgl.setter
def heatmapgl(self, val):
self['heatmapgl'] = val
# heatmap
# -------
@property
def heatmap(self):
"""
The 'heatmap' property is a tuple of instances of
Heatmap that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmap
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmap constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmap]
"""
return self['heatmap']
@heatmap.setter
def heatmap(self, val):
self['heatmap'] = val
# histogram2dcontour
# ------------------
@property
def histogram2dcontour(self):
"""
The 'histogram2dcontour' property is a tuple of instances of
Histogram2dContour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2dContour
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2dContour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2dContour]
"""
return self['histogram2dcontour']
@histogram2dcontour.setter
def histogram2dcontour(self, val):
self['histogram2dcontour'] = val
# histogram2d
# -----------
@property
def histogram2d(self):
"""
The 'histogram2d' property is a tuple of instances of
Histogram2d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2d
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2d]
"""
return self['histogram2d']
@histogram2d.setter
def histogram2d(self, val):
self['histogram2d'] = val
# histogram
# ---------
@property
def histogram(self):
"""
The 'histogram' property is a tuple of instances of
Histogram that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram]
"""
return self['histogram']
@histogram.setter
def histogram(self, val):
self['histogram'] = val
# isosurface
# ----------
@property
def isosurface(self):
"""
The 'isosurface' property is a tuple of instances of
Isosurface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Isosurface
- A list or tuple of dicts of string/value properties that
will be passed to the Isosurface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Isosurface]
"""
return self['isosurface']
@isosurface.setter
def isosurface(self, val):
self['isosurface'] = val
# mesh3d
# ------
@property
def mesh3d(self):
"""
The 'mesh3d' property is a tuple of instances of
Mesh3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Mesh3d
- A list or tuple of dicts of string/value properties that
will be passed to the Mesh3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Mesh3d]
"""
return self['mesh3d']
@mesh3d.setter
def mesh3d(self, val):
self['mesh3d'] = val
# ohlc
# ----
@property
def ohlc(self):
"""
The 'ohlc' property is a tuple of instances of
Ohlc that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Ohlc
- A list or tuple of dicts of string/value properties that
will be passed to the Ohlc constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Ohlc]
"""
return self['ohlc']
@ohlc.setter
def ohlc(self, val):
self['ohlc'] = val
# parcats
# -------
@property
def parcats(self):
"""
The 'parcats' property is a tuple of instances of
Parcats that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcats
- A list or tuple of dicts of string/value properties that
will be passed to the Parcats constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcats]
"""
return self['parcats']
@parcats.setter
def parcats(self, val):
self['parcats'] = val
# parcoords
# ---------
@property
def parcoords(self):
"""
The 'parcoords' property is a tuple of instances of
Parcoords that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcoords
- A list or tuple of dicts of string/value properties that
will be passed to the Parcoords constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcoords]
"""
return self['parcoords']
@parcoords.setter
def parcoords(self, val):
self['parcoords'] = val
# pie
# ---
@property
def pie(self):
"""
The 'pie' property is a tuple of instances of
Pie that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pie
- A list or tuple of dicts of string/value properties that
will be passed to the Pie constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pie]
"""
return self['pie']
@pie.setter
def pie(self, val):
self['pie'] = val
# pointcloud
# ----------
@property
def pointcloud(self):
"""
The 'pointcloud' property is a tuple of instances of
Pointcloud that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pointcloud
- A list or tuple of dicts of string/value properties that
will be passed to the Pointcloud constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pointcloud]
"""
return self['pointcloud']
@pointcloud.setter
def pointcloud(self, val):
self['pointcloud'] = val
# sankey
# ------
@property
def sankey(self):
"""
The 'sankey' property is a tuple of instances of
Sankey that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sankey
- A list or tuple of dicts of string/value properties that
will be passed to the Sankey constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sankey]
"""
return self['sankey']
@sankey.setter
def sankey(self, val):
self['sankey'] = val
# scatter3d
# ---------
@property
def scatter3d(self):
"""
The 'scatter3d' property is a tuple of instances of
Scatter3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter3d
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter3d]
"""
return self['scatter3d']
@scatter3d.setter
def scatter3d(self, val):
self['scatter3d'] = val
# scattercarpet
# -------------
@property
def scattercarpet(self):
"""
The 'scattercarpet' property is a tuple of instances of
Scattercarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattercarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Scattercarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattercarpet]
"""
return self['scattercarpet']
@scattercarpet.setter
def scattercarpet(self, val):
self['scattercarpet'] = val
# scattergeo
# ----------
@property
def scattergeo(self):
"""
The 'scattergeo' property is a tuple of instances of
Scattergeo that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergeo
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergeo constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergeo]
"""
return self['scattergeo']
@scattergeo.setter
def scattergeo(self, val):
self['scattergeo'] = val
# scattergl
# ---------
@property
def scattergl(self):
"""
The 'scattergl' property is a tuple of instances of
Scattergl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergl
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergl]
"""
return self['scattergl']
@scattergl.setter
def scattergl(self, val):
self['scattergl'] = val
# scattermapbox
# -------------
@property
def scattermapbox(self):
"""
The 'scattermapbox' property is a tuple of instances of
Scattermapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattermapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Scattermapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattermapbox]
"""
return self['scattermapbox']
@scattermapbox.setter
def scattermapbox(self, val):
self['scattermapbox'] = val
# scatterpolargl
# --------------
@property
def scatterpolargl(self):
"""
The 'scatterpolargl' property is a tuple of instances of
Scatterpolargl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolargl
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolargl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolargl]
"""
return self['scatterpolargl']
@scatterpolargl.setter
def scatterpolargl(self, val):
self['scatterpolargl'] = val
# scatterpolar
# ------------
@property
def scatterpolar(self):
"""
The 'scatterpolar' property is a tuple of instances of
Scatterpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolar]
"""
return self['scatterpolar']
@scatterpolar.setter
def scatterpolar(self, val):
self['scatterpolar'] = val
# scatter
# -------
@property
def scatter(self):
"""
The 'scatter' property is a tuple of instances of
Scatter that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter]
"""
return self['scatter']
@scatter.setter
def scatter(self, val):
self['scatter'] = val
# scatterternary
# --------------
@property
def scatterternary(self):
"""
The 'scatterternary' property is a tuple of instances of
Scatterternary that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterternary
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterternary constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterternary]
"""
return self['scatterternary']
@scatterternary.setter
def scatterternary(self, val):
self['scatterternary'] = val
# splom
# -----
@property
def splom(self):
"""
The 'splom' property is a tuple of instances of
Splom that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Splom
- A list or tuple of dicts of string/value properties that
will be passed to the Splom constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Splom]
"""
return self['splom']
@splom.setter
def splom(self, val):
self['splom'] = val
# streamtube
# ----------
@property
def streamtube(self):
"""
The 'streamtube' property is a tuple of instances of
Streamtube that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Streamtube
- A list or tuple of dicts of string/value properties that
will be passed to the Streamtube constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Streamtube]
"""
return self['streamtube']
@streamtube.setter
def streamtube(self, val):
self['streamtube'] = val
# sunburst
# --------
@property
def sunburst(self):
"""
The 'sunburst' property is a tuple of instances of
Sunburst that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sunburst
- A list or tuple of dicts of string/value properties that
will be passed to the Sunburst constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sunburst]
"""
return self['sunburst']
@sunburst.setter
def sunburst(self, val):
self['sunburst'] = val
# surface
# -------
@property
def surface(self):
"""
The 'surface' property is a tuple of instances of
Surface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Surface
- A list or tuple of dicts of string/value properties that
will be passed to the Surface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Surface]
"""
return self['surface']
@surface.setter
def surface(self, val):
self['surface'] = val
# table
# -----
@property
def table(self):
"""
The 'table' property is a tuple of instances of
Table that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Table
- A list or tuple of dicts of string/value properties that
will be passed to the Table constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Table]
"""
return self['table']
@table.setter
def table(self, val):
self['table'] = val
# violin
# ------
@property
def violin(self):
"""
The 'violin' property is a tuple of instances of
Violin that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Violin
- A list or tuple of dicts of string/value properties that
will be passed to the Violin constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Violin]
"""
return self['violin']
@violin.setter
def violin(self, val):
self['violin'] = val
# volume
# ------
@property
def volume(self):
"""
The 'volume' property is a tuple of instances of
Volume that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Volume
- A list or tuple of dicts of string/value properties that
will be passed to the Volume constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Volume]
"""
return self['volume']
@volume.setter
def volume(self, val):
self['volume'] = val
# waterfall
# ---------
@property
def waterfall(self):
"""
The 'waterfall' property is a tuple of instances of
Waterfall that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Waterfall
- A list or tuple of dicts of string/value properties that
will be passed to the Waterfall constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Waterfall]
"""
return self['waterfall']
@waterfall.setter
def waterfall(self, val):
self['waterfall'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'layout.template'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
area
plotly.graph_objs.layout.template.data.Area instance or
dict with compatible properties
barpolar
plotly.graph_objs.layout.template.data.Barpolar
instance or dict with compatible properties
bar
plotly.graph_objs.layout.template.data.Bar instance or
dict with compatible properties
box
plotly.graph_objs.layout.template.data.Box instance or
dict with compatible properties
candlestick
plotly.graph_objs.layout.template.data.Candlestick
instance or dict with compatible properties
carpet
plotly.graph_objs.layout.template.data.Carpet instance
or dict with compatible properties
choropleth
plotly.graph_objs.layout.template.data.Choropleth
instance or dict with compatible properties
cone
plotly.graph_objs.layout.template.data.Cone instance or
dict with compatible properties
contourcarpet
plotly.graph_objs.layout.template.data.Contourcarpet
instance or dict with compatible properties
contour
plotly.graph_objs.layout.template.data.Contour instance
or dict with compatible properties
heatmapgl
plotly.graph_objs.layout.template.data.Heatmapgl
instance or dict with compatible properties
heatmap
plotly.graph_objs.layout.template.data.Heatmap instance
or dict with compatible properties
histogram2dcontour
plotly.graph_objs.layout.template.data.Histogram2dConto
ur instance or dict with compatible properties
histogram2d
plotly.graph_objs.layout.template.data.Histogram2d
instance or dict with compatible properties
histogram
plotly.graph_objs.layout.template.data.Histogram
instance or dict with compatible properties
isosurface
plotly.graph_objs.layout.template.data.Isosurface
instance or dict with compatible properties
mesh3d
plotly.graph_objs.layout.template.data.Mesh3d instance
or dict with compatible properties
ohlc
plotly.graph_objs.layout.template.data.Ohlc instance or
dict with compatible properties
parcats
plotly.graph_objs.layout.template.data.Parcats instance
or dict with compatible properties
parcoords
plotly.graph_objs.layout.template.data.Parcoords
instance or dict with compatible properties
pie
plotly.graph_objs.layout.template.data.Pie instance or
dict with compatible properties
pointcloud
plotly.graph_objs.layout.template.data.Pointcloud
instance or dict with compatible properties
sankey
plotly.graph_objs.layout.template.data.Sankey instance
or dict with compatible properties
scatter3d
plotly.graph_objs.layout.template.data.Scatter3d
instance or dict with compatible properties
scattercarpet
plotly.graph_objs.layout.template.data.Scattercarpet
instance or dict with compatible properties
scattergeo
plotly.graph_objs.layout.template.data.Scattergeo
instance or dict with compatible properties
scattergl
plotly.graph_objs.layout.template.data.Scattergl
instance or dict with compatible properties
scattermapbox
plotly.graph_objs.layout.template.data.Scattermapbox
instance or dict with compatible properties
scatterpolargl
plotly.graph_objs.layout.template.data.Scatterpolargl
instance or dict with compatible properties
scatterpolar
plotly.graph_objs.layout.template.data.Scatterpolar
instance or dict with compatible properties
scatter
plotly.graph_objs.layout.template.data.Scatter instance
or dict with compatible properties
scatterternary
plotly.graph_objs.layout.template.data.Scatterternary
instance or dict with compatible properties
splom
plotly.graph_objs.layout.template.data.Splom instance
or dict with compatible properties
streamtube
plotly.graph_objs.layout.template.data.Streamtube
instance or dict with compatible properties
sunburst
plotly.graph_objs.layout.template.data.Sunburst
instance or dict with compatible properties
surface
plotly.graph_objs.layout.template.data.Surface instance
or dict with compatible properties
table
plotly.graph_objs.layout.template.data.Table instance
or dict with compatible properties
violin
plotly.graph_objs.layout.template.data.Violin instance
or dict with compatible properties
volume
plotly.graph_objs.layout.template.data.Volume instance
or dict with compatible properties
waterfall
plotly.graph_objs.layout.template.data.Waterfall
instance or dict with compatible properties
"""
def __init__(
self,
arg=None,
area=None,
barpolar=None,
bar=None,
box=None,
candlestick=None,
carpet=None,
choropleth=None,
cone=None,
contourcarpet=None,
contour=None,
heatmapgl=None,
heatmap=None,
histogram2dcontour=None,
histogram2d=None,
histogram=None,
isosurface=None,
mesh3d=None,
ohlc=None,
parcats=None,
parcoords=None,
pie=None,
pointcloud=None,
sankey=None,
scatter3d=None,
scattercarpet=None,
scattergeo=None,
scattergl=None,
scattermapbox=None,
scatterpolargl=None,
scatterpolar=None,
scatter=None,
scatterternary=None,
splom=None,
streamtube=None,
sunburst=None,
surface=None,
table=None,
violin=None,
volume=None,
waterfall=None,
**kwargs
):
"""
Construct a new Data object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.template.Data
area
plotly.graph_objs.layout.template.data.Area instance or
dict with compatible properties
barpolar
plotly.graph_objs.layout.template.data.Barpolar
instance or dict with compatible properties
bar
plotly.graph_objs.layout.template.data.Bar instance or
dict with compatible properties
box
plotly.graph_objs.layout.template.data.Box instance or
dict with compatible properties
candlestick
plotly.graph_objs.layout.template.data.Candlestick
instance or dict with compatible properties
carpet
plotly.graph_objs.layout.template.data.Carpet instance
or dict with compatible properties
choropleth
plotly.graph_objs.layout.template.data.Choropleth
instance or dict with compatible properties
cone
plotly.graph_objs.layout.template.data.Cone instance or
dict with compatible properties
contourcarpet
plotly.graph_objs.layout.template.data.Contourcarpet
instance or dict with compatible properties
contour
plotly.graph_objs.layout.template.data.Contour instance
or dict with compatible properties
heatmapgl
plotly.graph_objs.layout.template.data.Heatmapgl
instance or dict with compatible properties
heatmap
plotly.graph_objs.layout.template.data.Heatmap instance
or dict with compatible properties
histogram2dcontour
plotly.graph_objs.layout.template.data.Histogram2dConto
ur instance or dict with compatible properties
histogram2d
plotly.graph_objs.layout.template.data.Histogram2d
instance or dict with compatible properties
histogram
plotly.graph_objs.layout.template.data.Histogram
instance or dict with compatible properties
isosurface
plotly.graph_objs.layout.template.data.Isosurface
instance or dict with compatible properties
mesh3d
plotly.graph_objs.layout.template.data.Mesh3d instance
or dict with compatible properties
ohlc
plotly.graph_objs.layout.template.data.Ohlc instance or
dict with compatible properties
parcats
plotly.graph_objs.layout.template.data.Parcats instance
or dict with compatible properties
parcoords
plotly.graph_objs.layout.template.data.Parcoords
instance or dict with compatible properties
pie
plotly.graph_objs.layout.template.data.Pie instance or
dict with compatible properties
pointcloud
plotly.graph_objs.layout.template.data.Pointcloud
instance or dict with compatible properties
sankey
plotly.graph_objs.layout.template.data.Sankey instance
or dict with compatible properties
scatter3d
plotly.graph_objs.layout.template.data.Scatter3d
instance or dict with compatible properties
scattercarpet
plotly.graph_objs.layout.template.data.Scattercarpet
instance or dict with compatible properties
scattergeo
plotly.graph_objs.layout.template.data.Scattergeo
instance or dict with compatible properties
scattergl
plotly.graph_objs.layout.template.data.Scattergl
instance or dict with compatible properties
scattermapbox
plotly.graph_objs.layout.template.data.Scattermapbox
instance or dict with compatible properties
scatterpolargl
plotly.graph_objs.layout.template.data.Scatterpolargl
instance or dict with compatible properties
scatterpolar
plotly.graph_objs.layout.template.data.Scatterpolar
instance or dict with compatible properties
scatter
plotly.graph_objs.layout.template.data.Scatter instance
or dict with compatible properties
scatterternary
plotly.graph_objs.layout.template.data.Scatterternary
instance or dict with compatible properties
splom
plotly.graph_objs.layout.template.data.Splom instance
or dict with compatible properties
streamtube
plotly.graph_objs.layout.template.data.Streamtube
instance or dict with compatible properties
sunburst
plotly.graph_objs.layout.template.data.Sunburst
instance or dict with compatible properties
surface
plotly.graph_objs.layout.template.data.Surface instance
or dict with compatible properties
table
plotly.graph_objs.layout.template.data.Table instance
or dict with compatible properties
violin
plotly.graph_objs.layout.template.data.Violin instance
or dict with compatible properties
volume
plotly.graph_objs.layout.template.data.Volume instance
or dict with compatible properties
waterfall
plotly.graph_objs.layout.template.data.Waterfall
instance or dict with compatible properties
Returns
-------
Data
"""
super(Data, self).__init__('data')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.template.Data
constructor must be a dict or
an instance of plotly.graph_objs.layout.template.Data"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.layout.template import (data as v_data)
# Initialize validators
# ---------------------
self._validators['area'] = v_data.AreasValidator()
self._validators['barpolar'] = v_data.BarpolarsValidator()
self._validators['bar'] = v_data.BarsValidator()
self._validators['box'] = v_data.BoxsValidator()
self._validators['candlestick'] = v_data.CandlesticksValidator()
self._validators['carpet'] = v_data.CarpetsValidator()
self._validators['choropleth'] = v_data.ChoroplethsValidator()
self._validators['cone'] = v_data.ConesValidator()
self._validators['contourcarpet'] = v_data.ContourcarpetsValidator()
self._validators['contour'] = v_data.ContoursValidator()
self._validators['heatmapgl'] = v_data.HeatmapglsValidator()
self._validators['heatmap'] = v_data.HeatmapsValidator()
self._validators['histogram2dcontour'
] = v_data.Histogram2dContoursValidator()
self._validators['histogram2d'] = v_data.Histogram2dsValidator()
self._validators['histogram'] = v_data.HistogramsValidator()
self._validators['isosurface'] = v_data.IsosurfacesValidator()
self._validators['mesh3d'] = v_data.Mesh3dsValidator()
self._validators['ohlc'] = v_data.OhlcsValidator()
self._validators['parcats'] = v_data.ParcatssValidator()
self._validators['parcoords'] = v_data.ParcoordssValidator()
self._validators['pie'] = v_data.PiesValidator()
self._validators['pointcloud'] = v_data.PointcloudsValidator()
self._validators['sankey'] = v_data.SankeysValidator()
self._validators['scatter3d'] = v_data.Scatter3dsValidator()
self._validators['scattercarpet'] = v_data.ScattercarpetsValidator()
self._validators['scattergeo'] = v_data.ScattergeosValidator()
self._validators['scattergl'] = v_data.ScatterglsValidator()
self._validators['scattermapbox'] = v_data.ScattermapboxsValidator()
self._validators['scatterpolargl'] = v_data.ScatterpolarglsValidator()
self._validators['scatterpolar'] = v_data.ScatterpolarsValidator()
self._validators['scatter'] = v_data.ScattersValidator()
self._validators['scatterternary'] = v_data.ScatterternarysValidator()
self._validators['splom'] = v_data.SplomsValidator()
self._validators['streamtube'] = v_data.StreamtubesValidator()
self._validators['sunburst'] = v_data.SunburstsValidator()
self._validators['surface'] = v_data.SurfacesValidator()
self._validators['table'] = v_data.TablesValidator()
self._validators['violin'] = v_data.ViolinsValidator()
self._validators['volume'] = v_data.VolumesValidator()
self._validators['waterfall'] = v_data.WaterfallsValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('area', None)
self['area'] = area if area is not None else _v
_v = arg.pop('barpolar', None)
self['barpolar'] = barpolar if barpolar is not None else _v
_v = arg.pop('bar', None)
self['bar'] = bar if bar is not None else _v
_v = arg.pop('box', None)
self['box'] = box if box is not None else _v
_v = arg.pop('candlestick', None)
self['candlestick'] = candlestick if candlestick is not None else _v
_v = arg.pop('carpet', None)
self['carpet'] = carpet if carpet is not None else _v
_v = arg.pop('choropleth', None)
self['choropleth'] = choropleth if choropleth is not None else _v
_v = arg.pop('cone', None)
self['cone'] = cone if cone is not None else _v
_v = arg.pop('contourcarpet', None)
self['contourcarpet'
] = contourcarpet if contourcarpet is not None else _v
_v = arg.pop('contour', None)
self['contour'] = contour if contour is not None else _v
_v = arg.pop('heatmapgl', None)
self['heatmapgl'] = heatmapgl if heatmapgl is not None else _v
_v = arg.pop('heatmap', None)
self['heatmap'] = heatmap if heatmap is not None else _v
_v = arg.pop('histogram2dcontour', None)
self['histogram2dcontour'
] = histogram2dcontour if histogram2dcontour is not None else _v
_v = arg.pop('histogram2d', None)
self['histogram2d'] = histogram2d if histogram2d is not None else _v
_v = arg.pop('histogram', None)
self['histogram'] = histogram if histogram is not None else _v
_v = arg.pop('isosurface', None)
self['isosurface'] = isosurface if isosurface is not None else _v
_v = arg.pop('mesh3d', None)
self['mesh3d'] = mesh3d if mesh3d is not None else _v
_v = arg.pop('ohlc', None)
self['ohlc'] = ohlc if ohlc is not None else _v
_v = arg.pop('parcats', None)
self['parcats'] = parcats if parcats is not None else _v
_v = arg.pop('parcoords', None)
self['parcoords'] = parcoords if parcoords is not None else _v
_v = arg.pop('pie', None)
self['pie'] = pie if pie is not None else _v
_v = arg.pop('pointcloud', None)
self['pointcloud'] = pointcloud if pointcloud is not None else _v
_v = arg.pop('sankey', None)
self['sankey'] = sankey if sankey is not None else _v
_v = arg.pop('scatter3d', None)
self['scatter3d'] = scatter3d if scatter3d is not None else _v
_v = arg.pop('scattercarpet', None)
self['scattercarpet'
] = scattercarpet if scattercarpet is not None else _v
_v = arg.pop('scattergeo', None)
self['scattergeo'] = scattergeo if scattergeo is not None else _v
_v = arg.pop('scattergl', None)
self['scattergl'] = scattergl if scattergl is not None else _v
_v = arg.pop('scattermapbox', None)
self['scattermapbox'
] = scattermapbox if scattermapbox is not None else _v
_v = arg.pop('scatterpolargl', None)
self['scatterpolargl'
] = scatterpolargl if scatterpolargl is not None else _v
_v = arg.pop('scatterpolar', None)
self['scatterpolar'] = scatterpolar if scatterpolar is not None else _v
_v = arg.pop('scatter', None)
self['scatter'] = scatter if scatter is not None else _v
_v = arg.pop('scatterternary', None)
self['scatterternary'
] = scatterternary if scatterternary is not None else _v
_v = arg.pop('splom', None)
self['splom'] = splom if splom is not None else _v
_v = arg.pop('streamtube', None)
self['streamtube'] = streamtube if streamtube is not None else _v
_v = arg.pop('sunburst', None)
self['sunburst'] = sunburst if sunburst is not None else _v
_v = arg.pop('surface', None)
self['surface'] = surface if surface is not None else _v
_v = arg.pop('table', None)
self['table'] = table if table is not None else _v
_v = arg.pop('violin', None)
self['violin'] = violin if violin is not None else _v
_v = arg.pop('volume', None)
self['volume'] = volume if volume is not None else _v
_v = arg.pop('waterfall', None)
self['waterfall'] = waterfall if waterfall is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.layout.template import data
|
py | 1a385cbdef7248065aa7029db3231d945cfc84c7 | import io
import jsonpickle
import logging
import numpy as np
import os
from tqdm import tqdm
from typing import Tuple, List, Optional, Dict, Text, Any
import rasa.utils.io
from rasa.core import utils
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import PREV_PREFIX, Domain
from rasa.core.events import ActionExecuted
from rasa.core.trackers import DialogueStateTracker
from rasa.core.training.data import DialogueTrainingData
from rasa.utils.common import is_logging_disabled
logger = logging.getLogger(__name__)
class SingleStateFeaturizer(object):
"""Base class for mechanisms to transform the conversations state into ML formats.
Subclasses of SingleStateFeaturizer decide how the bot will transform
the conversation state to a format which a classifier can read:
feature vector.
"""
def prepare_from_domain(self, domain: Domain) -> None:
"""Helper method to init based on domain."""
pass
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Encode user input."""
raise NotImplementedError(
"SingleStateFeaturizer must have "
"the capacity to "
"encode states to a feature vector"
)
@staticmethod
def action_as_one_hot(action: Text, domain: Domain) -> np.ndarray:
"""Encode system action as one-hot vector."""
if action is None:
return np.ones(domain.num_actions, dtype=int) * -1
y = np.zeros(domain.num_actions, dtype=int)
y[domain.index_for_action(action)] = 1
return y
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain encoded in rows."""
pass
class BinarySingleStateFeaturizer(SingleStateFeaturizer):
"""Assumes all features are binary.
All features should be either on or off, denoting them with 1 or 0.
"""
def __init__(self):
"""Declares instant variables."""
super(BinarySingleStateFeaturizer, self).__init__()
self.num_features = None
self.input_state_map = None
def prepare_from_domain(self, domain: Domain) -> None:
"""Use Domain to prepare featurizer."""
self.num_features = domain.num_states
self.input_state_map = domain.input_state_map
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Returns a binary vector indicating which features are active.
Given a dictionary of states (e.g. 'intent_greet',
'prev_action_listen',...) return a binary vector indicating which
features of `self.input_features` are in the bag. NB it's a
regular double precision float array type.
For example with two active features out of five possible features
this would return a vector like `[0 0 1 0 1]`
If intent features are given with a probability, for example
with two active features and two uncertain intents out
of five possible features this would return a vector
like `[0.3, 0.7, 1.0, 0, 1.0]`.
If this is just a padding vector we set all values to `-1`.
padding vectors are specified by a `None` or `[None]`
value for states.
"""
if not self.num_features:
raise Exception(
"BinarySingleStateFeaturizer was not prepared before encoding."
)
if state is None or None in state:
return np.ones(self.num_features, dtype=np.int32) * -1
# we are going to use floats and convert to int later if possible
used_features = np.zeros(self.num_features, dtype=np.float)
using_only_ints = True
for state_name, prob in state.items():
if state_name in self.input_state_map:
idx = self.input_state_map[state_name]
used_features[idx] = prob
using_only_ints = using_only_ints and utils.is_int(prob)
else:
logger.debug(
"Feature '{}' (value: '{}') could not be found in "
"feature map. Make sure you added all intents and "
"entities to the domain".format(state_name, prob)
)
if using_only_ints:
# this is an optimization - saves us a bit of memory
return used_features.astype(np.int32)
else:
return used_features
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain encoded in rows as bag of words"""
return np.eye(domain.num_actions)
class LabelTokenizerSingleStateFeaturizer(SingleStateFeaturizer):
"""Creates bag-of-words feature vectors.
User intents and bot action names are split into tokens
and used to create bag-of-words feature vectors.
Args:
split_symbol: The symbol that separates words in
intets and action names.
use_shared_vocab: The flag that specifies if to create
the same vocabulary for user intents and bot actions.
"""
def __init__(
self, use_shared_vocab: bool = False, split_symbol: Text = "_"
) -> None:
"""inits vocabulary for label bag of words representation"""
super(LabelTokenizerSingleStateFeaturizer, self).__init__()
self.use_shared_vocab = use_shared_vocab
self.split_symbol = split_symbol
self.num_features = None
self.user_labels = []
self.slot_labels = []
self.bot_labels = []
self.bot_vocab = None
self.user_vocab = None
@staticmethod
def _create_label_token_dict(labels, split_symbol="_"):
"""Splits labels into tokens by using provided symbol.
Creates the lookup dictionary for this tokens.
Values in this dict are used for featurization.
"""
distinct_tokens = set(
[token for label in labels for token in label.split(split_symbol)]
)
return {token: idx for idx, token in enumerate(sorted(distinct_tokens))}
def prepare_from_domain(self, domain: Domain) -> None:
"""Creates internal vocabularies for user intents and bot actions."""
self.user_labels = domain.intent_states + domain.entity_states
self.slot_labels = domain.slot_states + domain.form_states
self.bot_labels = domain.action_names
if self.use_shared_vocab:
self.bot_vocab = self._create_label_token_dict(
self.bot_labels + self.user_labels, self.split_symbol
)
self.user_vocab = self.bot_vocab
else:
self.bot_vocab = self._create_label_token_dict(
self.bot_labels, self.split_symbol
)
self.user_vocab = self._create_label_token_dict(
self.user_labels, self.split_symbol
)
self.num_features = (
len(self.user_vocab) + len(self.slot_labels) + len(self.bot_vocab)
)
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Returns a binary vector indicating which tokens are present."""
if not self.num_features:
raise Exception(
"LabelTokenizerSingleStateFeaturizer "
"was not prepared before encoding."
)
if state is None or None in state:
return np.ones(self.num_features, dtype=np.int32) * -1
# we are going to use floats and convert to int later if possible
used_features = np.zeros(self.num_features, dtype=np.float)
using_only_ints = True
for state_name, prob in state.items():
using_only_ints = using_only_ints and utils.is_int(prob)
if state_name in self.user_labels:
if PREV_PREFIX + ACTION_LISTEN_NAME in state:
# else we predict next action from bot action and memory
for t in state_name.split(self.split_symbol):
used_features[self.user_vocab[t]] += prob
elif state_name in self.slot_labels:
offset = len(self.user_vocab)
idx = self.slot_labels.index(state_name)
used_features[offset + idx] += prob
elif state_name[len(PREV_PREFIX) :] in self.bot_labels:
action_name = state_name[len(PREV_PREFIX) :]
for t in action_name.split(self.split_symbol):
offset = len(self.user_vocab) + len(self.slot_labels)
idx = self.bot_vocab[t]
used_features[offset + idx] += prob
else:
logger.warning(
"Feature '{}' could not be found in "
"feature map.".format(state_name)
)
if using_only_ints:
# this is an optimization - saves us a bit of memory
return used_features.astype(np.int32)
else:
return used_features
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain encoded in rows as bag of words"""
encoded_all_actions = np.zeros(
(domain.num_actions, len(self.bot_vocab)), dtype=np.int32
)
for idx, name in enumerate(domain.action_names):
for t in name.split(self.split_symbol):
encoded_all_actions[idx, self.bot_vocab[t]] = 1
return encoded_all_actions
class TrackerFeaturizer(object):
"""Base class for actual tracker featurizers."""
def __init__(
self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
use_intent_probabilities: bool = False,
) -> None:
self.state_featurizer = state_featurizer
self.use_intent_probabilities = use_intent_probabilities
def _create_states(
self,
tracker: DialogueStateTracker,
domain: Domain,
is_binary_training: bool = False,
) -> List[Dict[Text, float]]:
"""Create states: a list of dictionaries.
If use_intent_probabilities is False (default behaviour),
pick the most probable intent out of all provided ones and
set its probability to 1.0, while all the others to 0.0.
"""
states = tracker.past_states(domain)
# during training we encounter only 1 or 0
if not self.use_intent_probabilities and not is_binary_training:
bin_states = []
for state in states:
# copy state dict to preserve internal order of keys
bin_state = dict(state)
best_intent = None
best_intent_prob = -1.0
for state_name, prob in state:
if state_name.startswith("intent_"):
if prob > best_intent_prob:
# finding the maximum confidence intent
if best_intent is not None:
# delete previous best intent
del bin_state[best_intent]
best_intent = state_name
best_intent_prob = prob
else:
# delete other intents
del bin_state[state_name]
if best_intent is not None:
# set the confidence of best intent to 1.0
bin_state[best_intent] = 1.0
bin_states.append(bin_state)
return bin_states
else:
return [dict(state) for state in states]
def _pad_states(self, states: List[Any]) -> List[Any]:
"""Pads states."""
return states
def _featurize_states(
self, trackers_as_states: List[List[Dict[Text, float]]]
) -> Tuple[np.ndarray, List[int]]:
"""Create X."""
features = []
true_lengths = []
for tracker_states in trackers_as_states:
dialogue_len = len(tracker_states)
# len(trackers_as_states) = 1 means
# it is called during prediction or we have
# only one story, so no padding is needed
if len(trackers_as_states) > 1:
tracker_states = self._pad_states(tracker_states)
story_features = [
self.state_featurizer.encode(state) for state in tracker_states
]
features.append(story_features)
true_lengths.append(dialogue_len)
# noinspection PyPep8Naming
X = np.array(features)
return X, true_lengths
def _featurize_labels(
self, trackers_as_actions: List[List[Text]], domain: Domain
) -> np.ndarray:
"""Create y."""
labels = []
for tracker_actions in trackers_as_actions:
if len(trackers_as_actions) > 1:
tracker_actions = self._pad_states(tracker_actions)
story_labels = [
self.state_featurizer.action_as_one_hot(action, domain)
for action in tracker_actions
]
labels.append(story_labels)
y = np.array(labels)
if y.ndim == 3 and isinstance(self, MaxHistoryTrackerFeaturizer):
# if it is MaxHistoryFeaturizer, remove time axis
y = y[:, 0, :]
return y
def training_states_and_actions(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions."""
raise NotImplementedError(
"Featurizer must have the capacity to encode trackers to feature vectors"
)
def featurize_trackers(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> DialogueTrainingData:
"""Create training data."""
if self.state_featurizer is None:
raise ValueError(
"Variable 'state_featurizer' is not set. Provide "
"'SingleStateFeaturizer' class to featurize trackers."
)
self.state_featurizer.prepare_from_domain(domain)
(trackers_as_states, trackers_as_actions) = self.training_states_and_actions(
trackers, domain
)
# noinspection PyPep8Naming
X, true_lengths = self._featurize_states(trackers_as_states)
y = self._featurize_labels(trackers_as_actions, domain)
return DialogueTrainingData(X, y, true_lengths)
def prediction_states(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction."""
raise NotImplementedError(
"Featurizer must have the capacity to create feature vector"
)
# noinspection PyPep8Naming
def create_X(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> np.ndarray:
"""Create X for prediction."""
trackers_as_states = self.prediction_states(trackers, domain)
X, _ = self._featurize_states(trackers_as_states)
return X
def persist(self, path):
featurizer_file = os.path.join(path, "featurizer.json")
rasa.utils.io.create_directory_for_file(featurizer_file)
# noinspection PyTypeChecker
rasa.utils.io.write_text_file(str(jsonpickle.encode(self)), featurizer_file)
@staticmethod
def load(path):
"""Loads the featurizer from file."""
featurizer_file = os.path.join(path, "featurizer.json")
if os.path.isfile(featurizer_file):
return jsonpickle.decode(rasa.utils.io.read_file(featurizer_file))
else:
logger.error(
"Couldn't load featurizer for policy. "
"File '{}' doesn't exist.".format(featurizer_file)
)
return None
class FullDialogueTrackerFeaturizer(TrackerFeaturizer):
"""Creates full dialogue training data for time distributed architectures.
Creates training data that uses each time output for prediction.
Training data is padded up to the length of the longest dialogue with -1.
"""
def __init__(
self,
state_featurizer: SingleStateFeaturizer,
use_intent_probabilities: bool = False,
) -> None:
super(FullDialogueTrackerFeaturizer, self).__init__(
state_featurizer, use_intent_probabilities
)
self.max_len = None
@staticmethod
def _calculate_max_len(trackers_as_actions):
"""Calculate the length of the longest dialogue."""
if trackers_as_actions:
return max([len(states) for states in trackers_as_actions])
else:
return None
def _pad_states(self, states: List[Any]) -> List[Any]:
"""Pads states up to max_len."""
if len(states) < self.max_len:
states += [None] * (self.max_len - len(states))
return states
def training_states_and_actions(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions.
Training data is padded up to the length of the longest dialogue with -1.
"""
trackers_as_states = []
trackers_as_actions = []
logger.debug(
"Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__, type(self.state_featurizer).__name__)
)
pbar = tqdm(trackers, desc="Processed trackers", disable=is_logging_disabled())
for tracker in pbar:
states = self._create_states(tracker, domain, is_binary_training=True)
delete_first_state = False
actions = []
for event in tracker.applied_events():
if isinstance(event, ActionExecuted):
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
actions.append(event.action_name)
else:
# unpredictable actions can be
# only the first in the story
if delete_first_state:
raise Exception(
"Found two unpredictable "
"actions in one story."
"Check your story files."
)
else:
delete_first_state = True
if delete_first_state:
states = states[1:]
trackers_as_states.append(states[:-1])
trackers_as_actions.append(actions)
self.max_len = self._calculate_max_len(trackers_as_actions)
logger.debug("The longest dialogue has {} actions.".format(self.max_len))
return trackers_as_states, trackers_as_actions
def prediction_states(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction."""
trackers_as_states = [
self._create_states(tracker, domain) for tracker in trackers
]
return trackers_as_states
class MaxHistoryTrackerFeaturizer(TrackerFeaturizer):
"""Slices the tracker history into max_history batches.
Creates training data that uses last output for prediction.
Training data is padded up to the max_history with -1.
"""
MAX_HISTORY_DEFAULT = 5
def __init__(
self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
max_history: int = None,
remove_duplicates: bool = True,
use_intent_probabilities: bool = False,
) -> None:
super(MaxHistoryTrackerFeaturizer, self).__init__(
state_featurizer, use_intent_probabilities
)
self.max_history = max_history or self.MAX_HISTORY_DEFAULT
self.remove_duplicates = remove_duplicates
@staticmethod
def slice_state_history(
states: List[Dict[Text, float]], slice_length: int
) -> List[Optional[Dict[Text, float]]]:
"""Slices states from the trackers history.
If the slice is at the array borders, padding will be added to ensure
the slice length.
"""
slice_end = len(states)
slice_start = max(0, slice_end - slice_length)
padding = [None] * max(0, slice_length - slice_end)
# noinspection PyTypeChecker
state_features = padding + states[slice_start:]
return state_features
@staticmethod
def _hash_example(states, action):
"""Hash states for efficient deduplication."""
frozen_states = tuple(
(s if s is None else frozenset(s.items()) for s in states)
)
frozen_actions = (action,)
return hash((frozen_states, frozen_actions))
def training_states_and_actions(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> Tuple[List[List[Optional[Dict[Text, float]]]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions.
Training data is padded up to the max_history with -1.
"""
trackers_as_states = []
trackers_as_actions = []
# from multiple states that create equal featurizations
# we only need to keep one.
hashed_examples = set()
logger.debug(
"Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__, type(self.state_featurizer).__name__)
)
pbar = tqdm(trackers, desc="Processed trackers", disable=is_logging_disabled())
for tracker in pbar:
states = self._create_states(tracker, domain, True)
idx = 0
for event in tracker.applied_events():
if isinstance(event, ActionExecuted):
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
sliced_states = self.slice_state_history(
states[: idx + 1], self.max_history
)
if self.remove_duplicates:
hashed = self._hash_example(
sliced_states, event.action_name
)
# only continue with tracker_states that created a
# hashed_featurization we haven't observed
if hashed not in hashed_examples:
hashed_examples.add(hashed)
trackers_as_states.append(sliced_states)
trackers_as_actions.append([event.action_name])
else:
trackers_as_states.append(sliced_states)
trackers_as_actions.append([event.action_name])
pbar.set_postfix(
{"# actions": "{:d}".format(len(trackers_as_actions))}
)
idx += 1
logger.debug("Created {} action examples.".format(len(trackers_as_actions)))
return trackers_as_states, trackers_as_actions
def prediction_states(
self, trackers: List[DialogueStateTracker], domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction."""
trackers_as_states = [
self._create_states(tracker, domain) for tracker in trackers
]
trackers_as_states = [
self.slice_state_history(states, self.max_history)
for states in trackers_as_states
]
return trackers_as_states
|
py | 1a385d5a231cf1fe8c22fc9ca18bd4920b9d91e1 | import wx
DisplayCrosshairsEventType = wx.NewEventType()
DisplayMagnifierEventType = wx.NewEventType()
FitToPageEventType = wx.NewEventType()
SetNumarrayEventType = wx.NewEventType()
ScaleSizeEventType = wx.NewEventType()
ScaleValuesEventType = wx.NewEventType()
EVT_DISPLAY_CROSSHAIRS = wx.PyEventBinder(DisplayCrosshairsEventType)
EVT_DISPLAY_MAGNIFIER = wx.PyEventBinder(DisplayMagnifierEventType)
EVT_FIT_TO_PAGE = wx.PyEventBinder(FitToPageEventType)
EVT_SET_NUMARRAY = wx.PyEventBinder(SetNumarrayEventType)
EVT_SCALE_SIZE = wx.PyEventBinder(ScaleSizeEventType)
EVT_SCALE_VALUES = wx.PyEventBinder(ScaleValuesEventType)
class DisplayCrosshairsEvent(wx.PyCommandEvent):
def __init__(self, source, display):
wx.PyCommandEvent.__init__(self, DisplayCrosshairsEventType,
source.GetId())
self.SetEventObject(source)
self.display = display
class DisplayMagnifierEvent(wx.PyCommandEvent):
def __init__(self, source, display):
wx.PyCommandEvent.__init__(self, DisplayMagnifierEventType,
source.GetId())
self.SetEventObject(source)
self.display = display
class FitToPageEvent(wx.PyCommandEvent):
def __init__(self, source):
wx.PyCommandEvent.__init__(self, FitToPageEventType, source.GetId())
self.SetEventObject(source)
class SetNumarrayEvent(wx.PyCommandEvent):
def __init__(self, source, array):
wx.PyCommandEvent.__init__(self, SetNumarrayEventType, source.GetId())
self.SetEventObject(source)
self.array = array
def GetNumarray(self):
return self.array
class ScaleSizeEvent(wx.PyCommandEvent):
def __init__(self, source, scale):
wx.PyCommandEvent.__init__(self, ScaleSizeEventType, source.GetId())
self.SetEventObject(source)
self.scale = scale
def GetScale(self):
return self.scale
class ScaleValuesEvent(wx.PyCommandEvent):
def __init__(self, source, valuerange):
wx.PyCommandEvent.__init__(self, ScaleValuesEventType, source.GetId())
self.SetEventObject(source)
self.valuerange = valuerange
def GetValueRange(self):
return self.valuerange
|
py | 1a385e8d9122df3e1cb3b8e757b4aec5cf3618c0 | from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from textattack.commands import TextAttackCommand
from textattack.commands.attack.attack_args import *
from textattack.commands.augment import AUGMENTATION_RECIPE_NAMES
def _cb(s):
return textattack.shared.utils.color_text(str(s), color="blue", method="ansi")
class ListThingsCommand(TextAttackCommand):
"""
The list module:
List default things in textattack.
"""
def _list(self, list_of_things):
""" Prints a list or dict of things. """
if isinstance(list_of_things, list):
list_of_things = sorted(list_of_things)
for thing in list_of_things:
print(_cb(thing))
elif isinstance(list_of_things, dict):
for thing in sorted(list_of_things.keys()):
thing_long_description = list_of_things[thing]
print(f"{_cb(thing)} ({thing_long_description})")
else:
raise TypeError(f"Cannot print list of type {type(list_of_things)}")
@staticmethod
def things():
list_dict = {}
list_dict["models"] = list(HUGGINGFACE_DATASET_BY_MODEL.keys()) + list(
TEXTATTACK_DATASET_BY_MODEL.keys()
)
list_dict["search-methods"] = SEARCH_METHOD_CLASS_NAMES
list_dict["transformations"] = {
**BLACK_BOX_TRANSFORMATION_CLASS_NAMES,
**WHITE_BOX_TRANSFORMATION_CLASS_NAMES,
}
list_dict["constraints"] = CONSTRAINT_CLASS_NAMES
list_dict["goal-functions"] = GOAL_FUNCTION_CLASS_NAMES
list_dict["attack-recipes"] = ATTACK_RECIPE_NAMES
list_dict["augmentation-recipes"] = AUGMENTATION_RECIPE_NAMES
return list_dict
def run(self, args):
try:
list_of_things = ListThingsCommand.things()[args.feature]
except KeyError:
raise ValuError(f"Unknown list key {args.thing}")
self._list(list_of_things)
@staticmethod
def register_subcommand(main_parser: ArgumentParser):
parser = main_parser.add_parser(
"list",
help="list features in TextAttack",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"feature", help=f"the feature to list", choices=ListThingsCommand.things()
)
parser.set_defaults(func=ListThingsCommand())
|
py | 1a385eb5b8828991db6cc4d75457b735e2ce8010 | import pytest
from aspr.sim.state import State, StateStatistics
from aspr.sim.interference_model import BinaryInterference
@pytest.fixture()
def state():
return State().init(size=5, int_model=BinaryInterference(), colors=range(5))
def test_copy_01(state):
state.tick().activate(0)
copy_state = state.copy()
copy_state.tick().activate(1)
assert copy_state.count_active_nodes() != state.count_active_nodes()
assert copy_state.t != state.t
assert copy_state.actvn_cnt != state.actvn_cnt
def test_copy_02(state):
state.tick().activate(0)
copy_state = state.copy()
assert copy_state.count_active_nodes() == state.count_active_nodes()
assert copy_state.t == state.t
assert copy_state.actvn_cnt == state.actvn_cnt
def test_count_active_nodes_01(state):
state.tick().activate(0)
assert state.count_active_nodes() == 1
def test_count_active_nodes_02(state):
state.tick().activate(0)._tick(8).activate(1)._tick(8)
assert state.count_active_nodes() == 1
def test_count_active_nodes_03(state):
state.tick().activate(0).tick().activate(1)
assert state.count_active_nodes() == 2
def test_count_active_nodes_04(state):
state.tick().activate(0)._tick(12)
assert state.count_active_nodes() == 0
def test_all_nodes_expired_01():
state = State().init(size=2, int_model=BinaryInterference(), colors=range(5))
state.tick().\
activate(0).\
tick().\
activate(1).\
_tick(12)
assert state.all_nodes_expired() == True
def test_all_nodes_expired_02():
state = State().init(size=2, int_model=BinaryInterference(), colors=range(5))
state.tick().\
activate(0).\
tick().\
activate(1).\
_tick(5)
assert state.all_nodes_expired() == False
def test_active_per_color_01(state):
state.tick().\
activate(0).\
tick().\
activate(1).\
tick().\
activate(0).\
tick()
stats = StateStatistics(state)
assert stats.active_per_color() == {
'c0-n-active': 2,
'c1-n-active': 1,
'c2-n-active': 0,
'c3-n-active': 0 ,
'c4-n-active': 0}
def test_active_per_color_02(state):
state.tick().\
activate(0).\
_tick(5).\
activate(3).\
tick().\
activate(0).\
_tick(5)
stats = StateStatistics(state)
assert stats.active_per_color() == {
'c0-n-active': 1,
'c1-n-active': 0,
'c2-n-active': 0,
'c3-n-active': 1,
'c4-n-active': 0}
def test_ttl_per_color_01(state):
state.tick().\
activate(0).\
tick().\
activate(1).\
tick().\
activate(0).\
tick()
stats = StateStatistics(state)
assert stats.ttl_per_color() == {
'c0-total-ttl': 16,
'c1-total-ttl': 8,
'c2-total-ttl': 0,
'c3-total-ttl': 0,
'c4-total-ttl': 0}
def test_ttl_per_color_02(state):
state.tick().\
activate(0).\
_tick(5).\
activate(3).\
tick().\
activate(0).\
_tick(5)
stats = StateStatistics(state)
assert stats.ttl_per_color() == {
'c0-total-ttl': 5,
'c1-total-ttl': 0,
'c2-total-ttl': 0,
'c3-total-ttl': 4,
'c4-total-ttl': 0}
|
py | 1a385eeef5292d279737aa10be9b98dd3977ce74 | #!python3
from lib import queries
from tkinter import *
from tkinter import ttk
from lib.gui import elements
from lib.gui import dialog
flag = 0
dbDict = {}
fDict = {}
class MainFrame(Toplevel):
def __init__(self, dbCon):
Toplevel.__init__(self)
self.geometry("800x600")
self.flag = flag
self.dbDict = dbDict
self.fDict = fDict
self.dbName = ''
self.dbCon = dbCon
# Initialize attribute options
self.ribbonVar = IntVar()
self.imgColVar = IntVar()
self.dropdownChoices = [ 'From-To', 'Timespan', 'Ισοζύγιο' ]
self.ribbonDropdownVar = StringVar()
self.ribbonType = 0
# Initialize the user interface
self.el = self.init_gui()
if flag == 0:
self.dbDict = queries.loadDatabases(self.dbCon)
# Populate treeview with form names
for k, v in self.dbDict.items():
self.tree.insert('', 'end', iid=k, values=(k, v))
def init_gui(self):
"""Builds GUI."""
return elements.Elements.create_widgets(self)
def update_form(self):
formId = self.tree.focus()
dl = dialog.MyDialog(self, 'Είστε σίγουροι ότι θέλετε να αλλάξετε τη φόρμα No.' + formId + ';').show()
if dl == True:
# INSERT PROGRESS BAR CALL HERE
queries.updateForm(self.dbCon, self.dbName, formId, self.check_ribbon(), self.ribbonType, self.check_imgCol())
def restore_form(self):
formId = self.tree.focus()
dl = dialog.MyDialog(self, 'Είστε σίγουροι ότι θέλετε να επαναφέρετε τη φόρμα No.' + formId + ';').show()
if dl == True:
queries.updateForm(self.dbCon, formId, self.check_ribbon, self.ribbonType, self.check_imgCol)
def new_form(self):
nextId = max(formIds) + 1
self.dbCon.executeScriptsFromFile("scripts\Insert_Form.sql")
xml = xmlhandler.FormXml()
for child in xml.get_xml():
print(child.tag, child.attrib)
def use_database(self):
isUsed = self.flag
if isUsed == 0:
selectedDatabaseId = self.tree.focus()
selectedDatabaseName = queries.getSelectedDatabaseName(self.dbCon, selectedDatabaseId)
self.flag = 1
for i in self.tree.get_children():
self.tree.delete(i)
self.fDict = queries.loadForms(self.dbCon, selectedDatabaseName)
# Populate treeview with form names
for k, v in self.fDict.items():
self.tree.insert('', 'end', iid=k, values=(k, v))
elements.Elements.changeText(self, 'Exit DB')
elements.Elements.showButtons(self)
self.dbName = selectedDatabaseName
elif isUsed == 1:
self.flag = 0
self.dbDict = queries.loadDatabases(db)
# Clear the tree
for i in self.tree.get_children():
self.tree.delete(i)
# Populate treeview with database names
for k, v in self.dbDict.items():
self.tree.insert('', 'end', iid=k, values=(k, v))
elements.Elements.changeText(self, 'Use DB')
elements.Elements.hideButtons(self)
def exit_manager(self):
self.quit()
# Checkbox methods for each attribute
def check_ribbon(self):
return self.ribbonVar.get()
def check_imgCol(self):
return self.imgColVar.get()
def choice_ribbon(self, value):
if value == 'From-To':
self.ribbonType = 0
elif value == 'Timespan':
self.ribbonType = 1
elif value == 'Ισοζύγιο':
self.ribbonType = 2
|
py | 1a385f1224b1e0a4248b8f37985411c7f2ada387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '古溪'
import os
import numpy as np
from dataset.data_util import pil_load_img
from dataset.dataload import TextDataset, TextInstance
from util.io import read_lines
import cv2
class Ctw1500Text(TextDataset):
def __init__(self, data_root, is_training=True, transform=None, ignore_list=None):
super().__init__(transform, is_training)
self.data_root = data_root
self.is_training = is_training
self.image_root = os.path.join(data_root, 'train' if is_training else 'test', "text_image")
self.annotation_root = os.path.join(data_root, 'train' if is_training else 'test', "text_label_circum")
self.image_list = os.listdir(self.image_root)
self.annotation_list = ['{}'.format(img_name.replace('.jpg', '')) for img_name in self.image_list]
@staticmethod
def parse_carve_txt(gt_path):
"""
.mat file parser
:param gt_path: (str), mat file path
:return: (list), TextInstance
"""
lines = read_lines(gt_path + ".txt")
polygons = []
for line in lines:
# line = strs.remove_all(line.strip('\ufeff'), '\xef\xbb\xbf')
gt = list(map(int, line.split(',')))
pts = np.stack([gt[4::2], gt[5::2]]).T.astype(np.int32)
pts[:, 0] = pts[:, 0] + gt[0]
pts[:, 1] = pts[:, 1] + gt[1]
polygons.append(TextInstance(pts, 'c', "**"))
return polygons
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self.image_root, image_id)
# Read image data
image = pil_load_img(image_path)
try:
h, w, c = image.shape
assert(c == 3)
except:
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.array(image)
# Read annotation
annotation_id = self.annotation_list[item]
annotation_path = os.path.join(self.annotation_root, annotation_id)
polygons = self.parse_carve_txt(annotation_path)
return self.get_training_data(image, polygons, image_id=image_id, image_path=image_path)
def __len__(self):
return len(self.image_list)
if __name__ == '__main__':
from util.augmentation import Augmentation
from util.misc import regularize_sin_cos
from nmslib import lanms
from util.pbox import bbox_transfor_inv, minConnectPath
from util import canvas as cav
import time
means = (0.485, 0.456, 0.406)
stds = (0.229, 0.224, 0.225)
transform = Augmentation(
size=640, mean=means, std=stds
)
trainset = Ctw1500Text(
data_root='../data/ctw1500',
is_training=True,
transform=transform
)
# img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta = trainset[944]
for idx in range(0, len(trainset)):
t0 = time.time()
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi = trainset[idx]
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi \
= map(lambda x: x.cpu().numpy(), (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi))
img = img.transpose(1, 2, 0)
img = ((img * stds + means) * 255).astype(np.uint8)
print(idx, img.shape)
top_map = radius_map[:, :, 0]
bot_map = radius_map[:, :, 1]
print(radius_map.shape)
sin_map, cos_map = regularize_sin_cos(sin_map, cos_map)
ret, labels = cv2.connectedComponents(tcl_mask[:, :, 0].astype(np.uint8), connectivity=8)
cv2.imshow("labels0", cav.heatmap(np.array(labels * 255 / np.max(labels), dtype=np.uint8)))
print(np.sum(tcl_mask[:, :, 1]))
t0 = time.time()
for bbox_idx in range(1, ret):
bbox_mask = labels == bbox_idx
text_map = tcl_mask[:, :, 0] * bbox_mask
boxes = bbox_transfor_inv(radius_map, sin_map, cos_map, text_map, wclip=(2, 8))
# nms
boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), 0.25)
boxes = boxes[:, :8].reshape((-1, 4, 2)).astype(np.int32)
if boxes.shape[0] > 1:
center = np.mean(boxes, axis=1).astype(np.int32).tolist()
paths, routes_path = minConnectPath(center)
boxes = boxes[routes_path]
top = np.mean(boxes[:, 0:2, :], axis=1).astype(np.int32).tolist()
bot = np.mean(boxes[:, 2:4, :], axis=1).astype(np.int32).tolist()
boundary_point = top + bot[::-1]
# for index in routes:
for ip, pp in enumerate(top):
if ip == 0:
color = (0, 255, 255)
elif ip == len(top) - 1:
color = (255, 255, 0)
else:
color = (0, 0, 255)
cv2.circle(img, (int(pp[0]), int(pp[1])), 2, color, -1)
for ip, pp in enumerate(bot):
if ip == 0:
color = (0, 255, 255)
elif ip == len(top) - 1:
color = (255, 255, 0)
else:
color = (0, 255, 0)
cv2.circle(img, (int(pp[0]), int(pp[1])), 2, color, -1)
cv2.drawContours(img, [np.array(boundary_point)], -1, (0, 255, 255), 1)
# print("nms time: {}".format(time.time() - t0))
# # cv2.imshow("", img)
# # cv2.waitKey(0)
# print(meta["image_id"])
cv2.imshow('imgs', img)
cv2.imshow("", cav.heatmap(np.array(labels * 255 / np.max(labels), dtype=np.uint8)))
cv2.imshow("tr_mask", cav.heatmap(np.array(tr_mask * 255 / np.max(tr_mask), dtype=np.uint8)))
cv2.imshow("tcl_mask",
cav.heatmap(np.array(tcl_mask[:, :, 1] * 255 / np.max(tcl_mask[:, :, 1]), dtype=np.uint8)))
# cv2.imshow("top_map", cav.heatmap(np.array(top_map * 255 / np.max(top_map), dtype=np.uint8)))
# cv2.imshow("bot_map", cav.heatmap(np.array(bot_map * 255 / np.max(bot_map), dtype=np.uint8)))
cv2.waitKey(0)
|
py | 1a386069a0113919a5b5910c73d41babef426d1d | from functools import partial
import torch.nn as nn
from detectron2.layers import (BatchNorm2d, NaiveSyncBatchNorm,
FrozenBatchNorm2d)
from detectron2.utils import env
def get_norm(norm, out_channels, **kwargs):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
kwargs: Additional parameters in normalization layers,
such as, eps, momentum
Returns:
nn.Module or None: the normalization layer
"""
if norm is None:
return None
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (
1, 5) else nn.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
# for debugging:
"nnSyncBN": nn.SyncBatchNorm,
"naiveSyncBN": NaiveSyncBatchNorm,
}[norm]
return norm(out_channels, **kwargs)
def get_activation(activation):
"""
Only support `ReLU` and `LeakyReLU` now.
Args:
activation (str or callable):
Returns:
nn.Module: the activation layer
"""
act = {
"ReLU": nn.ReLU,
"LeakyReLU": nn.LeakyReLU,
}[activation]
if activation == "LeakyReLU":
act = partial(act, negative_slope=0.1)
return act(inplace=True)
|
py | 1a3861f0c7d520f76bcb39ad121d3ec1ee29ebcc | """
In this lesson, we'll cover two more advanced data types: lists and dictionaries.
Let's start with lists.
Lists are a great way to store a bunch of stuff in a collection. Here's an example:
"""
myList = ["foo", 5, 7, True, "hello", False, -1]
"""
This list contains a bunch of things. In other languages, making lists (often also
called 'arrays') with different datatypes (our list has integers, strings, and
booleans) is a bit harder. Python ends up taking care of stuff for you under
the hood, but in languages like Java or C you'll have to do it yourself.
For a list to be helpful, we need to be able to grab or manipulate the values
in it. To get the values out of the list, we can 'index' them. Indexing them is
basically just saying "give me the x'th item in this list". In computer science,
array's start at the 0'th index. So if I want to grab the first thing in the list,
I really have to index the zero'th thing in the list.
"""
# print(myList) # Prints the whole list
# print(myList[0]) # Gets the element at index 0, which is the first element.
# print(myList[4]) # Gets the element at index 4, which is the fifth element.
# print(myList[-1]) # Gets the last element (the -1 kind of 'wraps around')
"""
Let's take a closer look at what we did here: myList[index_number]. We can actually
do a couple of more things with this. The formal notation for this is
myList[start:end:step], so you can actually specify exactly what parts of the
list you want. Here are some examples:
"""
# print(myList[0:4]) # Gets the elements 0, 1, 2, 3. Note: the 'end' is not inclusive
# print(myList[3:4]) # Gets the third element only, because the end is not inclusive
# print(myList[:5]) # Gets the first five elements, the 0 for 'start' is assumed.
# print(myList[::2]) # Here we do not specify a start or end, but we say 'step' is 2,
# so we get every other element.
# print(myList[::-1]) # Prints the reverse of the list because 'step' is -1
"""
Now that we've covered some list indexing and slicing, lets see what else we can do
with lists.
"""
# myList.pop(0) # Removes the item of the list at the 0'th index.
# print(myList) # Now the list is missing it's first item, "foo"!
# print(len(myList)) # Prints the length of the list
"""
Now that we have lists under our belt, lets move on to dictionaries. Dictionaries
are similar to lists where we can store a bunch of values, but dictionaries contain
direct mappings of values to other values. Here's an example:
"""
addresses = {
"John Smith": "123 Technology Drive",
"Jane Doe": "615 Main Street",
"George Washington": "923 Providence Circle"
}
"""
There's a couple things to notice here. First, we use curly braces '{' instead of
the square brackets '[' for lists. Second, we have to have colons between the keys
and values. In our case, the names are called the keys and the addresses are the
values. Lastly, we need commas in between the key-value pairs. Once we have a
dictionary, we can access the values with their keys like this:
"""
# print(addresses["John Smith"])
# print(addresses["Jane Doe"])
"""
As you can see, you can use the same kind of format as lists, but passing in the key
instead of the list index. You can also set the values this way, like this:
"""
# addresses["John Smith"] = "322 1st Street" # Overwrites the value at key "John Doe"
# print(addresses["John Smith"])
# addresses["Adam Jones"] = "817 42nd Street South" # We can add new key-value pairs
# like this too
# print(addresses)
"""
Here's some more things you can do with dictionaries.
"""
# addresses.pop("John Smith") # Removes a key-value pair
# print(addresses)
addresses.update({
"Mark Howard": "123 Seymour Drive",
"Carol Smith": "512 Arden Way"
}) # Can add any number of key-value pairs into the dictionary, or 'combine' two
# print(addresses)
# print(addresses.keys()) # Gets a list of all of the keys in the dictionary
# print(addresses.values()) # Gets a list of all of the values in the dictionary
"""
To hammer a lot of this information in, take a look at "My Files/Python/3 - Practice.py"
for some practice problems.
"""
|
py | 1a3862357038b17e888230569f340b080ea605e6 | """
This schema represents all known key/value pairs for the builder config file.
"""
from strictyaml import (
load,
Map,
MapPattern,
Str,
Int,
Float,
Seq,
YAMLError,
Optional,
Bool
)
stat_schema = Seq(
Map({
"name": Str(),
"tag": Str(),
"values": Seq(
Map({
"name": Str(),
"value": Int() | Float(),
Optional("nominalValue"): Int() | Float(),
Optional("linkedValue"): Int() | Float(),
Optional("rangeMinValue"): Int() | Float(),
Optional("rangeMaxValue"): Int() | Float(),
Optional("flags"): Int()
})
)
}),
)
stat_format4_schema = Seq(
Map({
"name": Str(),
Optional("flags"): Int(),
"location": MapPattern(Str(), Int() | Float()),
})
)
instance_schema = MapPattern(Str(), Seq(
Map({
Optional("familyName"): Str(),
Optional("styleName"): Str(),
"coordinates": MapPattern(Str(), Int() | Float()),
})
))
schema = Map(
{
"sources": Seq(Str()),
Optional("vttSources"): MapPattern(Str(), Str()),
Optional("logLevel"): Str(),
Optional("stylespaceFile"): Str(),
Optional("stat"): stat_schema | MapPattern(Str(), stat_schema),
Optional("statFormat4"): stat_format4_schema | MapPattern(Str(), stat_format4_schema),
Optional("familyName"): Str(),
Optional("includeSourceFixes"): Bool(),
Optional("stylespaceFile"): Str(),
Optional("instances"): instance_schema,
Optional("buildVariable"): Bool(),
Optional("buildStatic"): Bool(),
Optional("buildOTF"): Bool(),
Optional("buildTTF"): Bool(),
Optional("buildWebfont"): Bool(),
Optional("outputDir"): Str(),
Optional("vfDir"): Str(),
Optional("ttDir"): Str(),
Optional("otDir"): Str(),
Optional("woffDir"): Str(),
Optional("cleanUp"): Bool(),
Optional("autohintTTF"): Bool(),
Optional("axisOrder"): Seq(Str()),
Optional("flattenComponents"): Bool(),
Optional("decomposeTransformedComponents"): Bool(),
}
)
|
py | 1a386263bacc3ed57debdbac18f2a212814621a9 | from b_lambda_layer_common.source.python.b_lambda_layer_common.ssm.refreshable import Refreshable
def test_FUNC_refresh_on_error_WITH_error_EXPECT_value_refreshed():
"""
Test whether decorator works.
:return: No return.
"""
class DummyParameter(Refreshable):
def __init__(self):
super().__init__()
self.called_counter = 0
def update_value(self):
self.called_counter += 1
dummy_parameter = DummyParameter()
@dummy_parameter.refresh_on_error()
def dummy_function():
raise Exception()
try:
dummy_function()
except:
pass
assert dummy_parameter.called_counter == 1
|
py | 1a3863e42d94ed9dc0b39ef2e511d3982f9f04b9 | from functools import wraps
from inspect import iscoroutinefunction
import falcon
try:
import jsonschema
except ImportError: # pragma: nocover
pass
def validate(req_schema=None, resp_schema=None, is_async=False):
"""Decorator for validating ``req.media`` using JSON Schema.
This decorator provides standard JSON Schema validation via the
``jsonschema`` package available from PyPI. Semantic validation via
the *format* keyword is enabled for the default checkers implemented
by ``jsonschema.FormatChecker``.
Note:
The `jsonschema`` package must be installed separately in order to use
this decorator, as Falcon does not install it by default.
See `json-schema.org <http://json-schema.org/>`_ for more
information on defining a compatible dictionary.
Keyword Args:
req_schema (dict): A dictionary that follows the JSON
Schema specification. The request will be validated against this
schema.
resp_schema (dict): A dictionary that follows the JSON
Schema specification. The response will be validated against this
schema.
is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
the decorated responder is a coroutine function (i.e., that it
is defined with ``async def``) or that it returns an awaitable
coroutine object.
Normally, when the function source is declared using ``async def``,
the resulting function object is flagged to indicate it returns a
coroutine when invoked, and this can be automatically detected.
However, it is possible to use a regular function to return an
awaitable coroutine object, in which case a hint is required to let
the framework know what to expect. Also, a hint is always required
when using a cythonized coroutine function, since Cython does not
flag them in a way that can be detected in advance, even when the
function is declared using ``async def``.
Example:
.. code:: python
from falcon.media.validators import jsonschema
# -- snip --
@jsonschema.validate(my_post_schema)
def on_post(self, req, resp):
# -- snip --
"""
def decorator(func):
if iscoroutinefunction(func) or is_async:
return _validate_async(func, req_schema, resp_schema)
return _validate(func, req_schema, resp_schema)
return decorator
def _validate(func, req_schema=None, resp_schema=None):
@wraps(func)
def wrapper(self, req, resp, *args, **kwargs):
if req_schema is not None:
try:
jsonschema.validate(
req.media, req_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError as e:
raise falcon.HTTPBadRequest(
'Request data failed validation',
description=e.message
)
result = func(self, req, resp, *args, **kwargs)
if resp_schema is not None:
try:
jsonschema.validate(
resp.media, resp_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError:
raise falcon.HTTPInternalServerError(
'Response data failed validation'
# Do not return 'e.message' in the response to
# prevent info about possible internal response
# formatting bugs from leaking out to users.
)
return result
return wrapper
def _validate_async(func, req_schema=None, resp_schema=None):
@wraps(func)
async def wrapper(self, req, resp, *args, **kwargs):
if req_schema is not None:
m = await req.get_media()
try:
jsonschema.validate(
m, req_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError as e:
raise falcon.HTTPBadRequest(
'Request data failed validation',
description=e.message
)
result = await func(self, req, resp, *args, **kwargs)
if resp_schema is not None:
try:
jsonschema.validate(
resp.media, resp_schema,
format_checker=jsonschema.FormatChecker()
)
except jsonschema.ValidationError:
raise falcon.HTTPInternalServerError(
'Response data failed validation'
# Do not return 'e.message' in the response to
# prevent info about possible internal response
# formatting bugs from leaking out to users.
)
return result
return wrapper
|
py | 1a38642ef6f05c94b68893e37899a170e64bd111 | import collections
import copy
import functools
import logging
import sys
import six
from jsonschema import Draft4Validator, ValidationError, draft4_format_checker
from werkzeug import FileStorage
from ..exceptions import ExtraParameterProblem
from ..http_facts import FORM_CONTENT_TYPES
from ..json_schema import Draft4RequestValidator, Draft4ResponseValidator
from ..problem import problem
from ..utils import all_json, boolean, is_json_mimetype, is_null, is_nullable
logger = logging.getLogger('connexion.decorators.validation')
TYPE_MAP = {
'integer': int,
'number': float,
'boolean': boolean
}
class TypeValidationError(Exception):
def __init__(self, schema_type, parameter_type, parameter_name):
"""
Exception raise when type validation fails
:type schema_type: str
:type parameter_type: str
:type parameter_name: str
:return:
"""
self.schema_type = schema_type
self.parameter_type = parameter_type
self.parameter_name = parameter_name
def __str__(self):
msg = "Wrong type, expected '{schema_type}' for {parameter_type} parameter '{parameter_name}'"
return msg.format(**vars(self))
def coerce_type(param, value, parameter_type, parameter_name=None):
def make_type(value, type_literal):
type_func = TYPE_MAP.get(type_literal)
return type_func(value)
param_schema = param.get("schema", param)
if is_nullable(param_schema) and is_null(value):
return None
param_type = param_schema.get('type')
parameter_name = parameter_name if parameter_name else param.get('name')
if param_type == "array":
converted_params = []
for v in value:
try:
converted = make_type(v, param_schema["items"]["type"])
except (ValueError, TypeError):
converted = v
converted_params.append(converted)
return converted_params
else:
try:
return make_type(value, param_type)
except ValueError:
raise TypeValidationError(param_type, parameter_type, parameter_name)
except TypeError:
return value
def validate_parameter_list(request_params, spec_params):
request_params = set(request_params)
spec_params = set(spec_params)
return request_params.difference(spec_params)
class RequestBodyValidator(object):
def __init__(self, schema, consumes, api, is_null_value_valid=False, validator=None,
strict_validation=False):
"""
:param schema: The schema of the request body
:param consumes: The list of content types the operation consumes
:param is_null_value_valid: Flag to indicate if null is accepted as valid value.
:param validator: Validator class that should be used to validate passed data
against API schema. Default is jsonschema.Draft4Validator.
:type validator: jsonschema.IValidator
:param strict_validation: Flag indicating if parameters not in spec are allowed
"""
self.consumes = consumes
self.schema = schema
self.has_default = schema.get('default', False)
self.is_null_value_valid = is_null_value_valid
validatorClass = validator or Draft4RequestValidator
self.validator = validatorClass(schema, format_checker=draft4_format_checker)
self.api = api
self.strict_validation = strict_validation
def validate_formdata_parameter_list(self, request):
request_params = request.form.keys()
spec_params = self.schema.get('properties', {}).keys()
return validate_parameter_list(request_params, spec_params)
def __call__(self, function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
@functools.wraps(function)
def wrapper(request):
if all_json(self.consumes):
data = request.json
empty_body = not(request.body or request.form or request.files)
if data is None and not empty_body and not self.is_null_value_valid:
try:
ctype_is_json = is_json_mimetype(request.headers.get("Content-Type", ""))
except ValueError:
ctype_is_json = False
if ctype_is_json:
# Content-Type is json but actual body was not parsed
return problem(400,
"Bad Request",
"Request body is not valid JSON"
)
else:
# the body has contents that were not parsed as JSON
return problem(415,
"Unsupported Media Type",
"Invalid Content-type ({content_type}), expected JSON data".format(
content_type=request.headers.get("Content-Type", "")
))
logger.debug("%s validating schema...", request.url)
error = self.validate_schema(data, request.url)
if error and not self.has_default:
return error
elif self.consumes[0] in FORM_CONTENT_TYPES:
data = dict(request.form.items()) or (request.body if len(request.body) > 0 else {})
data.update(dict.fromkeys(request.files, '')) # validator expects string..
logger.debug('%s validating schema...', request.url)
if self.strict_validation:
formdata_errors = self.validate_formdata_parameter_list(request)
if formdata_errors:
raise ExtraParameterProblem(formdata_errors, [])
if data:
props = self.schema.get("properties", {})
errs = []
for k, param_defn in props.items():
if k in data:
try:
data[k] = coerce_type(param_defn, data[k], 'requestBody', k)
except TypeValidationError as e:
errs += [str(e)]
print(errs)
if errs:
return problem(400, 'Bad Request', errs)
error = self.validate_schema(data, request.url)
if error:
return error
response = function(request)
return response
return wrapper
def validate_schema(self, data, url):
# type: (dict, AnyStr) -> Union[ConnexionResponse, None]
if self.is_null_value_valid and is_null(data):
return None
try:
self.validator.validate(data)
except ValidationError as exception:
logger.error("{url} validation error: {error}".format(url=url,
error=exception.message),
extra={'validator': 'body'})
return problem(400, 'Bad Request', str(exception.message))
return None
class ResponseBodyValidator(object):
def __init__(self, schema, validator=None):
"""
:param schema: The schema of the response body
:param validator: Validator class that should be used to validate passed data
against API schema. Default is jsonschema.Draft4Validator.
:type validator: jsonschema.IValidator
"""
ValidatorClass = validator or Draft4ResponseValidator
self.validator = ValidatorClass(schema, format_checker=draft4_format_checker)
def validate_schema(self, data, url):
# type: (dict, AnyStr) -> Union[ConnexionResponse, None]
try:
self.validator.validate(data)
except ValidationError as exception:
logger.error("{url} validation error: {error}".format(url=url,
error=exception),
extra={'validator': 'response'})
six.reraise(*sys.exc_info())
return None
class ParameterValidator(object):
def __init__(self, parameters, api, strict_validation=False):
"""
:param parameters: List of request parameter dictionaries
:param api: api that the validator is attached to
:param strict_validation: Flag indicating if parameters not in spec are allowed
"""
self.parameters = collections.defaultdict(list)
for p in parameters:
self.parameters[p['in']].append(p)
self.api = api
self.strict_validation = strict_validation
@staticmethod
def validate_parameter(parameter_type, value, param, param_name=None):
if value is not None:
if is_nullable(param) and is_null(value):
return
try:
converted_value = coerce_type(param, value, parameter_type, param_name)
except TypeValidationError as e:
return str(e)
param = copy.deepcopy(param)
if 'required' in param:
del param['required']
try:
if parameter_type == 'formdata' and param.get('type') == 'file':
Draft4Validator(
param,
format_checker=draft4_format_checker,
types={'file': FileStorage}).validate(converted_value)
else:
Draft4Validator(
param, format_checker=draft4_format_checker).validate(converted_value)
except ValidationError as exception:
debug_msg = 'Error while converting value {converted_value} from param ' \
'{type_converted_value} of type real type {param_type} to the declared type {param}'
fmt_params = dict(
converted_value=str(converted_value),
type_converted_value=type(converted_value),
param_type=param.get('type'),
param=param
)
logger.info(debug_msg.format(**fmt_params))
return str(exception)
elif param.get('required'):
return "Missing {parameter_type} parameter '{param[name]}'".format(**locals())
def validate_query_parameter_list(self, request):
request_params = request.query.keys()
spec_params = [x['name'] for x in self.parameters.get('query', [])]
return validate_parameter_list(request_params, spec_params)
def validate_formdata_parameter_list(self, request):
request_params = request.form.keys()
spec_params = [x['name'] for x in self.parameters.get('formData', [])]
return validate_parameter_list(request_params, spec_params)
def validate_query_parameter(self, param, request):
"""
Validate a single query parameter (request.args in Flask)
:type param: dict
:rtype: str
"""
val = request.query.get(param['name'])
return self.validate_parameter('query', val, param)
def validate_path_parameter(self, param, request):
val = request.path_params.get(param['name'].replace('-', '_'))
return self.validate_parameter('path', val, param)
def validate_header_parameter(self, param, request):
val = request.headers.get(param['name'])
return self.validate_parameter('header', val, param)
def validate_formdata_parameter(self, param_name, param, request):
if param.get('type') == 'file' or param.get('format') == 'binary':
val = request.files.get(param_name)
else:
val = request.form.get(param_name)
return self.validate_parameter('formdata', val, param)
def __call__(self, function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
@functools.wraps(function)
def wrapper(request):
logger.debug("%s validating parameters...", request.url)
if self.strict_validation:
query_errors = self.validate_query_parameter_list(request)
formdata_errors = self.validate_formdata_parameter_list(request)
if formdata_errors or query_errors:
raise ExtraParameterProblem(formdata_errors, query_errors)
for param in self.parameters.get('query', []):
error = self.validate_query_parameter(param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
for param in self.parameters.get('path', []):
error = self.validate_path_parameter(param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
for param in self.parameters.get('header', []):
error = self.validate_header_parameter(param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
for param in self.parameters.get('formData', []):
error = self.validate_formdata_parameter(param["name"], param, request)
if error:
response = problem(400, 'Bad Request', error)
return self.api.get_response(response)
return function(request)
return wrapper
|
py | 1a38653651f0b18e1974dfd90e7b42e539647c48 | #
# PySNMP MIB module CISCO-DMN-DSG-CI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-DMN-DSG-CI-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:37:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
ciscoDSGUtilities, = mibBuilder.importSymbols("CISCO-DMN-DSG-ROOT-MIB", "ciscoDSGUtilities")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Integer32, MibIdentifier, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter32, NotificationType, ObjectIdentity, Gauge32, Unsigned32, IpAddress, ModuleIdentity, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibIdentifier", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter32", "NotificationType", "ObjectIdentity", "Gauge32", "Unsigned32", "IpAddress", "ModuleIdentity", "Bits")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
ciscoDSGCI = ModuleIdentity((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12))
ciscoDSGCI.setRevisions(('2012-03-20 08:00', '2010-10-13 08:00', '2010-08-30 09:00', '2010-04-12 09:00', '2010-03-22 05:00', '2010-02-12 12:00', '2009-12-07 12:00',))
if mibBuilder.loadTexts: ciscoDSGCI.setLastUpdated('201203200800Z')
if mibBuilder.loadTexts: ciscoDSGCI.setOrganization('Cisco Systems, Inc.')
ciTable = MibIdentifier((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2))
ciConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1), )
if mibBuilder.loadTexts: ciConfigTable.setStatus('current')
ciConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1), ).setIndexNames((0, "CISCO-DMN-DSG-CI-MIB", "ciConfigInstance"))
if mibBuilder.loadTexts: ciConfigEntry.setStatus('current')
ciConfigInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1)))
if mibBuilder.loadTexts: ciConfigInstance.setStatus('current')
ciConfigQuery = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigQuery.setStatus('current')
ciConfigAutoReset = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigAutoReset.setStatus('current')
ciConfigListMgmt = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("addDelete", 1), ("updateAll", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigListMgmt.setStatus('current')
ciConfigOrgNetIDUse = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigOrgNetIDUse.setStatus('current')
ciConfigTransportId = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigTransportId.setStatus('current')
ciConfigOrgNetID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigOrgNetID.setStatus('current')
ciConfigTsHandling = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("entire", 1), ("partial", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciConfigTsHandling.setStatus('current')
ciProgramDecrTable = MibTable((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 2), )
if mibBuilder.loadTexts: ciProgramDecrTable.setStatus('current')
ciProgramDecrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 2, 1), ).setIndexNames((0, "CISCO-DMN-DSG-CI-MIB", "ciProgramDecrPEID"))
if mibBuilder.loadTexts: ciProgramDecrEntry.setStatus('current')
ciProgramDecrPEID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32)))
if mibBuilder.loadTexts: ciProgramDecrPEID.setStatus('current')
ciProgramDecrDecrypt = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("on", 2), ("comp", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciProgramDecrDecrypt.setStatus('current')
ciProgramDecrCISlot = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("top", 1), ("bottom", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciProgramDecrCISlot.setStatus('current')
ciCompConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3), )
if mibBuilder.loadTexts: ciCompConfigTable.setStatus('current')
ciCompConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1), ).setIndexNames((0, "CISCO-DMN-DSG-CI-MIB", "ciCompConfigID"))
if mibBuilder.loadTexts: ciCompConfigEntry.setStatus('current')
ciCompConfigID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32)))
if mibBuilder.loadTexts: ciCompConfigID.setStatus('current')
ciCompConfigPEID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigPEID.setStatus('current')
ciCompConfigMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("pid", 1), ("stream", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigMode.setStatus('current')
ciCompConfigPID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8192))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigPID.setStatus('current')
ciCompConfigStreamCategory = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 8, 12))).clone(namedValues=NamedValues(("vid", 2), ("aud", 3), ("subt", 4), ("ttx", 8), ("user", 12)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigStreamCategory.setStatus('current')
ciCompConfigStreamTypeVal = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigStreamTypeVal.setStatus('current')
ciCompConfigStreamInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigStreamInstance.setStatus('current')
ciCompConfigRowCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 3, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciCompConfigRowCmdStatus.setStatus('current')
ciCompStatusTable = MibTable((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4), )
if mibBuilder.loadTexts: ciCompStatusTable.setStatus('current')
ciCompStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1), ).setIndexNames((0, "CISCO-DMN-DSG-CI-MIB", "ciStatusSlot"))
if mibBuilder.loadTexts: ciCompStatusEntry.setStatus('current')
ciStatusSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("top", 1), ("bottom", 2))))
if mibBuilder.loadTexts: ciStatusSlot.setStatus('current')
ciStatusSysName = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusSysName.setStatus('current')
ciStatusMFGID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusMFGID.setStatus('current')
ciStatusMFGCode = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusMFGCode.setStatus('current')
ciStatusSerialNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusSerialNum.setStatus('current')
ciStatusHWVer = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusHWVer.setStatus('current')
ciStatusAppVer = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusAppVer.setStatus('current')
ciStatusCompany = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 39))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusCompany.setStatus('current')
ciStatusProdname = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 39))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusProdname.setStatus('current')
ciStatusCamStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 4, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notReady", 1), ("ready", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciStatusCamStatus.setStatus('current')
ciSystemIDTable = MibTable((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5), )
if mibBuilder.loadTexts: ciSystemIDTable.setStatus('current')
ciSystemIDEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5, 1), ).setIndexNames((0, "CISCO-DMN-DSG-CI-MIB", "ciSystemIDSlot"), (0, "CISCO-DMN-DSG-CI-MIB", "ciSystemIDIndex"))
if mibBuilder.loadTexts: ciSystemIDEntry.setStatus('current')
ciSystemIDSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("top", 1), ("bottom", 2))))
if mibBuilder.loadTexts: ciSystemIDSlot.setStatus('current')
ciSystemIDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32)))
if mibBuilder.loadTexts: ciSystemIDIndex.setStatus('current')
ciSystemIDName = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciSystemIDName.setStatus('current')
ciSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciSystemID.setStatus('current')
ciSystemSysNameID = MibTableColumn((1, 3, 6, 1, 4, 1, 1429, 2, 2, 5, 12, 2, 5, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 37))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciSystemSysNameID.setStatus('current')
mibBuilder.exportSymbols("CISCO-DMN-DSG-CI-MIB", ciStatusAppVer=ciStatusAppVer, PYSNMP_MODULE_ID=ciscoDSGCI, ciConfigInstance=ciConfigInstance, ciCompConfigTable=ciCompConfigTable, ciCompConfigStreamInstance=ciCompConfigStreamInstance, ciStatusMFGCode=ciStatusMFGCode, ciConfigOrgNetID=ciConfigOrgNetID, ciProgramDecrDecrypt=ciProgramDecrDecrypt, ciCompConfigEntry=ciCompConfigEntry, ciSystemIDName=ciSystemIDName, ciSystemIDTable=ciSystemIDTable, ciCompConfigStreamTypeVal=ciCompConfigStreamTypeVal, ciCompConfigRowCmdStatus=ciCompConfigRowCmdStatus, ciConfigQuery=ciConfigQuery, ciStatusCompany=ciStatusCompany, ciCompConfigStreamCategory=ciCompConfigStreamCategory, ciStatusSlot=ciStatusSlot, ciStatusMFGID=ciStatusMFGID, ciStatusSerialNum=ciStatusSerialNum, ciSystemSysNameID=ciSystemSysNameID, ciscoDSGCI=ciscoDSGCI, ciConfigTransportId=ciConfigTransportId, ciProgramDecrEntry=ciProgramDecrEntry, ciStatusHWVer=ciStatusHWVer, ciConfigTable=ciConfigTable, ciStatusCamStatus=ciStatusCamStatus, ciTable=ciTable, ciStatusProdname=ciStatusProdname, ciSystemIDEntry=ciSystemIDEntry, ciConfigListMgmt=ciConfigListMgmt, ciCompConfigMode=ciCompConfigMode, ciConfigTsHandling=ciConfigTsHandling, ciProgramDecrTable=ciProgramDecrTable, ciConfigOrgNetIDUse=ciConfigOrgNetIDUse, ciSystemIDSlot=ciSystemIDSlot, ciCompConfigID=ciCompConfigID, ciProgramDecrPEID=ciProgramDecrPEID, ciStatusSysName=ciStatusSysName, ciConfigAutoReset=ciConfigAutoReset, ciCompStatusTable=ciCompStatusTable, ciSystemIDIndex=ciSystemIDIndex, ciCompStatusEntry=ciCompStatusEntry, ciSystemID=ciSystemID, ciProgramDecrCISlot=ciProgramDecrCISlot, ciConfigEntry=ciConfigEntry, ciCompConfigPID=ciCompConfigPID, ciCompConfigPEID=ciCompConfigPEID)
|
py | 1a386554fe69cba3476723c6ef4ff1fd918f9669 | import os
from decouple import config
from flask import Flask, render_template
from core.model import cotacoes
def create_app():
app = Flask('core')
app.config["SECRET_KEY"] = config('SECRET_KEY')
@app.route('/')
def home():
dicionario = cotacoes.cotar()
if dicionario['sucesso']:
template_renderised = render_template("index.html", dicionario=dicionario)
else:
template_renderised = render_template('error.html', dicionario=dicionario)
return template_renderised
return app
if __name__ == '__main__':
app = create_app()
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
py | 1a386556d97c49872668ae4e4943217dd45e40e9 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""adam"""
import numpy as np
from mindspore.common import dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer
_learning_rate_update_func = ['linear', 'cos', 'sin']
adam_opt = C.MultitypeFuncGraph("adam_opt")
@adam_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Bool")
def _update_run_op(beta1, beta2, eps, lr, weight_decay_tensor, param, m, v, gradient, decay_flag):
"""
Update parameters.
Args:
beta1 (Tensor): The exponential decay rate for the 1st moment estimates. Should be in range (0.0, 1.0).
beta2 (Tensor): The exponential decay rate for the 2nd moment estimates. Should be in range (0.0, 1.0).
eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
lr (Tensor): Learning rate.
weight_decay_tensor (Tensor): Weight decay. Should be equal to or greater than 0.
param (Tensor): Parameters.
m (Tensor): m value of parameters.
v (Tensor): v value of parameters.
gradient (Tensor): Gradient of parameters.
Returns:
Tensor, the new value of v after updating.
"""
op_mul = P.Mul()
op_square = P.Square()
op_sqrt = P.Sqrt()
op_cast = P.Cast()
op_reshape = P.Reshape()
op_shape = P.Shape()
param_fp32 = op_cast(param, mstype.float32)
m_fp32 = op_cast(m, mstype.float32)
v_fp32 = op_cast(v, mstype.float32)
gradient_fp32 = op_cast(gradient, mstype.float32)
next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta1, gradient_fp32)
next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32)
- beta2, op_square(gradient_fp32))
update = next_m / (op_sqrt(next_v) + eps)
if decay_flag:
update = update + op_mul(weight_decay_tensor, param_fp32)
update_with_lr = op_mul(lr, update)
next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))
next_v = F.depend(next_v, F.assign(param, next_param))
next_v = F.depend(next_v, F.assign(m, next_m))
next_v = F.depend(next_v, F.assign(v, next_v))
return next_v
def _check_param_value(beta1, beta2, eps, weight_decay, prim_name):
"""Check the type of inputs."""
validator.check_value_type("beta1", beta1, [float], prim_name)
validator.check_value_type("beta2", beta2, [float], prim_name)
validator.check_value_type("eps", eps, [float], prim_name)
validator.check_value_type("weight_dacay", weight_decay, [float], prim_name)
validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
validator.check_number_range("eps", eps, 0.0, float("inf"), Rel.INC_NEITHER, prim_name)
validator.check_number_range("weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
def _check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, prim_name):
"""Check the type of inputs."""
validator.check_float_positive('learning_rate', learning_rate, prim_name)
validator.check_float_legal_value('learning_rate', learning_rate, prim_name)
validator.check_float_positive('end_learning_rate', end_learning_rate, prim_name)
validator.check_float_legal_value('end_learning_rate', end_learning_rate, prim_name)
validator.check_float_positive('power', power, prim_name)
validator.check_float_legal_value('power', power, prim_name)
validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
@adam_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number", "Tensor", "Tensor", "Tensor",
"Tensor")
def _run_opt_with_one_number(opt, lr, beta1_power, beta2_power, beta1, beta2, eps, gradient, params, moment1,
moment2):
"""Apply adam optimizer to the weight parameter using Tensor."""
success = True
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
eps, gradient))
return success
class Adam(Optimizer):
r"""
Updates gradients by Adaptive Moment Estimation (Adam) algorithm.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector `moment1`, :math:`v` represents the 2nd moment vector `moment2`,
:math:`g` represents `gradients`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent
`beta1` and `beta2`, :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent
`beta1_power` and `beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `params`,
:math:`\epsilon` represents `eps`.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be class mindspore.Parameter.
learning_rate (Union[float, Tensor, Iterable]): A value for the learning rate. When the learning_rate is
Iterable or a Tensor and the dims of the Tensor is 1,
use dynamic learning rate, then the i-th step will
take the i-th value as the learning rate.
When the learning_rate is float or learning_rate is a Tensor
but the dims of the Tensor is 0, use fixed learning rate.
Other cases are not supported. Default: 1e-3.
beta1 (float): The exponential decay rate for the 1st moment estimates. Should be in range (0.0, 1.0). Default:
0.9.
beta2 (float): The exponential decay rate for the 2nd moment estimates. Should be in range (0.0, 1.0). Default:
0.999.
eps (float): Term added to the denominator to improve numerical stability. Should be greater than 0. Default:
1e-8.
use_locking (bool): Whether to enable a lock to protect updating variable tensors.
If True, updating of the var, m, and v tensors will be protected by a lock.
If False, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If True, updates the gradients using NAG.
If False, updates the gradients without using NAG. Default: False.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
loss_scale (float): A floating point value for the loss scale. Should be equal to or greater than 1. Default:
1.0.
decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default:
lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
Tensor[bool], the value is True.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.Adam(params=net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
use_nesterov=False, weight_decay=0.0, loss_scale=1.0,
decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
super(Adam, self).__init__(learning_rate, params, weight_decay, loss_scale, decay_filter)
_check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
validator.check_value_type("use_locking", use_locking, [bool], self.cls_name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.cls_name)
validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name)
validator.check_number_range("loss_scale", loss_scale, 1.0, float("inf"), Rel.INC_LEFT, self.cls_name)
self.beta1 = Tensor(beta1, mstype.float32)
self.beta2 = Tensor(beta2, mstype.float32)
self.beta1_power = Parameter(initializer(1, [1], mstype.float32), name="beta1_power")
self.beta2_power = Parameter(initializer(1, [1], mstype.float32), name="beta2_power")
self.eps = eps
self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.Adam(use_locking, use_nesterov)
self.pow = P.Pow()
self.sqrt = P.Sqrt()
self.one = Tensor(np.array([1.0]).astype(np.float32))
self.realdiv = P.RealDiv()
def construct(self, gradients):
params = self.parameters
moment1 = self.moment1
moment2 = self.moment2
gradients = self.decay_weight(gradients)
gradients = self.scale_grad(gradients)
lr = self.get_lr()
beta1_power = self.beta1_power * self.beta1
self.beta1_power = beta1_power
beta2_power = self.beta2_power * self.beta2
self.beta2_power = beta2_power
success = self.hyper_map(F.partial(adam_opt, self.opt, lr, beta1_power, beta2_power, self.beta1,
self.beta2, self.eps),
gradients, params, moment1, moment2)
return success
class AdamWeightDecay(Optimizer):
"""
Implements Adam algorithm weight decay fix.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be class mindspore.Parameter.
learning_rate (Union[float, Tensor, Iterable]): A value for the learning rate. When the learning_rate is
Iterable or a Tensor and the dims of the Tensor is 1,
use dynamic learning rate, then the i-th step will
take the i-th value as the learning rate.
When the learning_rate is float or learning_rate is a Tensor
but the dims of the Tensor is 0, use fixed learning rate.
Other cases are not supported. Default: 1e-3.
beta1 (float): The exponential decay rate for the 1st moment estimates. Default: 0.9.
Should be in range (0.0, 1.0).
beta2 (float): The exponential decay rate for the 2nd moment estimates. Default: 0.999.
Should be in range (0.0, 1.0).
eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.
Should be greater than 0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default:
lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
tuple[Parameter], the updated velocity value, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.AdamWeightDecay(params=net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0,
decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
super(AdamWeightDecay, self).__init__(learning_rate, params)
_check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
self.eps = Tensor(np.array([eps]).astype(np.float32))
self.weight_decay_tensor = Tensor(np.array([weight_decay]).astype(np.float32))
self.params = self.parameters
self.moments1 = self.params.clone(prefix="adam_m", init='zeros')
self.moments2 = self.params.clone(prefix="adam_v", init='zeros')
self.decay_flag = tuple(decay_filter(x) for x in self.params)
self.hyper_map = C.HyperMap()
def construct(self, gradients):
lr = self.get_lr()
updated_velocity = self.hyper_map(F.partial(adam_opt, self.beta1, self.beta2, self.eps, lr,
self.weight_decay_tensor),
self.params, self.moments1, self.moments2, gradients, self.decay_flag)
return updated_velocity
class AdamWeightDecayDynamicLR(Optimizer):
"""
Adam Weight Decay Dynamic Learning Rate (LR).
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be class mindspore.Parameter.
decay_steps (int): The steps of the decay.
learning_rate (float): A floating point value for the learning rate. Default: 0.001.
end_learning_rate (float): A floating point value for the end learning rate. Default: 0.0001.
power (float): Power. Default: 10.0.
beta1 (float): The exponential decay rate for the 1st moment estimates. Default: 0.9.
Should be in range (0.0, 1.0).
beta2 (float): The exponential decay rate for the 2nd moment estimates. Default: 0.999.
Should be in range (0.0, 1.0).
eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.
Should be greater than 0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default:
lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
tuple[Parameter], the updated velocity value, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.AdamWeightDecayDynamicLR(params=net.trainable_params(), decay_steps=10)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self,
params,
decay_steps,
learning_rate=0.001,
end_learning_rate=0.0001,
power=10.0,
beta1=0.9,
beta2=0.999,
eps=1e-6,
weight_decay=0.0,
decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
super(AdamWeightDecayDynamicLR, self).__init__(learning_rate, params)
_check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
_check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, self.cls_name)
# turn them to scalar when me support scalar/tensor mix operations
self.global_step = Parameter(initializer(0, [1]), name="global_step")
self.decay_steps = Tensor(np.array([decay_steps]).astype(np.float32))
self.end_learning_rate = Tensor(np.array([end_learning_rate]).astype(np.float32))
self.diff_learning_rate = Tensor(np.array([learning_rate - end_learning_rate]).astype(np.float32))
self.power = power
self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
self.eps = Tensor(np.array([eps]).astype(np.float32))
self.weight_decay_tensor = Tensor(np.array([weight_decay]).astype(np.float32))
self.params = self.parameters
self.moments1 = self.params.clone(prefix="adam_m", init='zeros')
self.moments2 = self.params.clone(prefix="adam_v", init='zeros')
self.decay_flag = tuple(decay_filter(x) for x in self.params)
self.hyper_map = C.HyperMap()
self.min = P.Minimum()
self.pow = P.Pow()
self.one = Tensor(np.array([1.0]).astype(np.float32))
def construct(self, gradients):
step = self.min(self.global_step, self.decay_steps)
p = step / self.decay_steps
lr = self.diff_learning_rate * self.pow(self.one - p, self.power) + self.end_learning_rate
updated_velocity = self.hyper_map(F.partial(adam_opt, self.beta1, self.beta2, self.eps, lr,
self.weight_decay_tensor),
self.params, self.moments1, self.moments2, gradients, self.decay_flag)
added_global_step = self.global_step + self.one
F.control_depend(lr, added_global_step)
self.global_step = added_global_step
return updated_velocity
|
py | 1a386654076e929611c6f1ce81e232d4830d72cd | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
try:
from pandas._testing import makeMissingDataframe
except ImportError:
from pandas.util.testing import makeMissingDataframe
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
class StatsTest(PandasOnSparkTestCase, SQLTestUtils):
def _test_stat_functions(self, pdf_or_pser, psdf_or_psser):
functions = ["max", "min", "mean", "sum", "count"]
for funcname in functions:
self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)())
functions = ["std", "var", "product", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(),
getattr(pdf_or_pser, funcname)(),
check_exact=False,
)
functions = ["std", "var", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(ddof=0),
getattr(pdf_or_pser, funcname)(ddof=0),
check_exact=False,
)
# NOTE: To test skew, kurt, and median, just make sure they run.
# The numbers are different in spark and pandas.
functions = ["skew", "kurt", "median"]
for funcname in functions:
getattr(psdf_or_psser, funcname)()
def test_stat_functions(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]})
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
# empty
self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]])
self._test_stat_functions(pdf.loc[[]], psdf.loc[[]])
def test_stat_functions_multiindex_column(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
def test_stat_functions_with_no_numeric_columns(self):
pdf = pd.DataFrame(
{
"A": ["a", None, "c", "d", None, "f", "g"],
"B": ["A", "B", "C", None, "E", "F", None],
}
)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf, psdf)
def test_sum(self):
pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3))
self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum())
self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1))
self.assert_eq(psdf["a"].sum(), pdf["a"].sum())
self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3))
self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3))
self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum())
self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1))
def test_product(self):
pdf = pd.DataFrame(
{"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.product(), pdf.product(), check_exact=False)
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False)
self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product())
self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1))
self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False)
self.assert_eq(
psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False
)
self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3))
self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3))
self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product())
self.assert_eq(
psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1)
)
def test_abs(self):
pdf = pd.DataFrame(
{
"A": [1, -2, np.nan, -4, 5],
"B": [1.0, -2, np.nan, -4, 5],
"C": [-6.0, -7, -8, np.nan, 10],
"D": ["a", "b", "c", "d", np.nan],
"E": [True, np.nan, False, True, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.A.abs(), pdf.A.abs())
self.assert_eq(psdf.B.abs(), pdf.B.abs())
self.assert_eq(psdf.E.abs(), pdf.E.abs())
# pandas' bug?
# self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs())
self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs())
self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs())
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.abs()
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.D.abs()
def test_axis_on_dataframe(self):
# The number of each count is intentionally big
# because when data is small, it executes a shortcut.
# Less than 'compute.shortcut_limit' will execute a shortcut
# by using collected pandas dataframe directly.
# now we set the 'compute.shortcut_limit' as 1000 explicitly
with option_context("compute.shortcut_limit", 1000):
pdf = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5] * 300,
"B": [1.0, -2, 3, -4, 5] * 300,
"C": [-6.0, -7, -8, -9, 10] * 300,
"D": [True, False, True, False, False] * 300,
},
index=range(10, 15001, 10),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.count(axis=1), pdf.count(axis=1))
self.assert_eq(psdf.var(axis=1), pdf.var(axis=1))
self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0))
self.assert_eq(psdf.std(axis=1), pdf.std(axis=1))
self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0))
self.assert_eq(psdf.max(axis=1), pdf.max(axis=1))
self.assert_eq(psdf.min(axis=1), pdf.min(axis=1))
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1))
self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1))
self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1))
self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1))
self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0))
self.assert_eq(
psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True)
)
self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True))
self.assert_eq(
psdf.var(axis=1, ddof=0, numeric_only=True),
pdf.var(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True))
self.assert_eq(
psdf.std(axis=1, ddof=0, numeric_only=True),
pdf.std(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(
psdf.max(axis=1, numeric_only=True),
pdf.max(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.min(axis=1, numeric_only=True),
pdf.min(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.sum(axis=1, numeric_only=True),
pdf.sum(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.product(axis=1, numeric_only=True),
pdf.product(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True)
)
self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True))
self.assert_eq(
psdf.sem(axis=1, ddof=0, numeric_only=True),
pdf.sem(axis=1, ddof=0, numeric_only=True),
)
def test_corr(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# DataFrame
# we do not handle NaNs for now
pdf = makeMissingDataframe(0.3, 42).fillna(0)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_a = pdf.A
pser_b = pdf.B
psser_a = psdf.A
psser_b = psdf.B
self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b))
self.assertRaises(TypeError, lambda: psser_a.corr(psdf))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_xa = pdf[("X", "A")]
pser_xb = pdf[("X", "B")]
psser_xa = psdf[("X", "A")]
psser_xb = psdf[("X", "B")]
self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True)
def test_cov_corr_meta(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
pdf = pd.DataFrame(
{
"a": np.array([1, 2, 3], dtype="i1"),
"b": np.array([1, 2, 3], dtype="i2"),
"c": np.array([1, 2, 3], dtype="i4"),
"d": np.array([1, 2, 3]),
"e": np.array([1.0, 2.0, 3.0], dtype="f4"),
"f": np.array([1.0, 2.0, 3.0]),
"g": np.array([True, False, True]),
"h": np.array(list("abc")),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr())
def test_stats_on_boolean_dataframe(self):
pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.min(), pdf.min())
self.assert_eq(psdf.max(), pdf.max())
self.assert_eq(psdf.count(), pdf.count())
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.product(), pdf.product())
self.assert_eq(psdf.mean(), pdf.mean())
self.assert_eq(psdf.var(), pdf.var(), check_exact=False)
self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False)
self.assert_eq(psdf.std(), pdf.std(), check_exact=False)
self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False)
self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False)
self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False)
def test_stats_on_boolean_series(self):
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(psser.min(), pser.min())
self.assert_eq(psser.max(), pser.max())
self.assert_eq(psser.count(), pser.count())
self.assert_eq(psser.sum(), pser.sum())
self.assert_eq(psser.product(), pser.product())
self.assert_eq(psser.mean(), pser.mean())
self.assert_eq(psser.var(), pser.var(), almost=True)
self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True)
self.assert_eq(psser.std(), pser.std(), almost=True)
self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True)
self.assert_eq(psser.sem(), pser.sem(), almost=True)
self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True)
def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True)
)
self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True))
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True))
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int)
)
self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True))
self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.var(ddof=0, numeric_only=True),
pdf.var(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.std(ddof=0, numeric_only=True),
pdf.std(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.sem(ddof=0, numeric_only=True),
pdf.sem(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True)))
self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True)))
self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True)))
# Boolean was excluded because of a behavior change in NumPy
# https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits
# but this behavior is inconsistent in pandas context.
# Boolean column in quantile tests are excluded for now.
# TODO(SPARK-35555): track and match the behavior of quantile to pandas'
pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
len(psdf.quantile(q=0.5, numeric_only=True)),
len(pdf.quantile(q=0.5, numeric_only=True)),
)
self.assert_eq(
len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
)
def test_numeric_only_unsupported(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False)
)
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False),
pdf[["i", "b"]].sum(numeric_only=False).astype(int),
)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.sum(numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.s.sum()
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_stats import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
py | 1a3866c1c1e66c5854e48705f6bef8e561728c4b | '''
Easely rank players or teams using the Elo-rating system.
```python
# Example usage:
import elo
r1 = 2400 # Player r1 with rating 2400
r2 = 2000 # Player r2 with rating 2000
# New elo for player r1 after loosing to player r2
r1_prime = elo.update(r1, r2, elo.LOSS)
# New elo for player r2 after winning over player r1
r2_prime = elo.update(r2, r1, elo.WON)
```
'''
K_FACTOR = 32 # The K-factor measures how strong a match will impact the players’ ratings.
#----- The actual score Used as input for `S(outcome)` function ------
LOSS = 0.0
TIE = 0.5
WON = 1.0
#---------------------------------------------------------------------
def R(r):
'''
Compute the transformed rating of a player.
'''
return pow(10, r/400)
def E(R1, R2):
'''
Returns the expected score of a transformed rating R1 vs another transformed rating R2.
The return value is a float and ranges between 0 and 1 (inclusive).
'''
return R1/(R1 + R2)
def S(outcome):
'''
Get the acutal score after a match has finished.
'''
return outcome
def r_prime(r1, S1, E1):
'''
Get the new elo based on the current rating r1, the score for the target player S1 and
the exprected score of the target player vs another.
'''
return r1 + K_FACTOR * (S1 - E1)
def update(r1, r2, S1):
'''
Get the updated elo (rounded) of a player after a match has finished.
'''
return round(r_prime(r1, S1, E(R(r1), R(r2))))
|
py | 1a3866d604e660dd9fb7db8995e8378f43fc6d6a | from typing import Tuple
import re
RESULT_PATTERN = re.compile(r'([^\*]+)(\**)')
def convert_to_stars(t_value: float) -> str:
t = abs(t_value)
if t < 1.645:
return ''
elif t < 1.96:
return '*'
elif t < 2.576:
return '**'
elif t > 2.576:
return '***'
else: # covers nan
return ''
def parse_stars_value(value: str) -> Tuple[str, str]:
match = RESULT_PATTERN.fullmatch(value)
if not match:
return '', ''
result = match.group(1)
stars = match.group(2)
return result, stars |
py | 1a38685c4299d9636970da7ab8ad7b305964cad0 | from lookup import sds_connection_factory
from lookup.sds_client import SDSClient, SDSMockClient
from utilities import config
from utilities import integration_adaptors_logger as log
from utilities.string_utilities import str2bool
logger = log.IntegrationAdaptorsLogger(__name__)
def get_sds_client():
use_mock = str2bool(config.get_config('MOCK_LDAP_RESPONSE', default=str(False)))
if use_mock:
pause_duration = int(config.get_config('MOCK_LDAP_PAUSE', default="0"))
logger.warning("!!! IMPORTANT !!! Using LDAP mock response with %sms delay", pause_duration)
return SDSMockClient()
else:
sds_connection = sds_connection_factory.create_connection()
search_base = config.get_config("LDAP_SEARCH_BASE")
return SDSClient(sds_connection, search_base)
|
py | 1a386888fa31532f422640d90db7471960a0a105 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test the top-level pydl functions.
"""
import pytest
import numpy as np
try:
from astropy.tests.compat import assert_allclose
except ImportError:
from numpy.testing import assert_allclose
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from ..file_lines import file_lines
from ..median import median
from ..pcomp import pcomp
from ..rebin import rebin
from ..smooth import smooth
from ..uniq import uniq
def test_file_lines():
#
# Find the test files
#
line_numbers = (1, 42, 137)
plainfiles = [get_pkg_data_filename('t/this-file-contains-{0:d}-lines.txt'.format(l)) for l in line_numbers]
gzfiles = [get_pkg_data_filename('t/this-file-contains-{0:d}-lines.txt.gz'.format(l)) for l in line_numbers]
for i, p in enumerate(plainfiles):
n = file_lines(p)
assert n == line_numbers[i]
for i, p in enumerate(gzfiles):
n = file_lines(p, compress=True)
assert n == line_numbers[i]
#
# Test list passing
#
n = file_lines(plainfiles)
assert tuple(n) == line_numbers
n = file_lines(gzfiles, compress=True)
assert tuple(n) == line_numbers
#
# Make sure empty files work
#
n = file_lines(get_pkg_data_filename('t/this-file-is-empty.txt'))
assert n == 0
def test_median():
odd_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
dtype=np.float32)
even_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=np.float32)
assert median(odd_data) == 7
assert median(odd_data, even=True) == 7
assert median(even_data) == 7
assert median(even_data, even=True) == 6.5
assert (median(odd_data, 3) == odd_data).all()
with raises(ValueError):
foo = median(np.ones((9, 9, 9)), 3)
odd_data2 = np.vstack((odd_data, odd_data, odd_data, odd_data, odd_data))
assert (median(odd_data2, 3) == odd_data2).all()
assert (median(odd_data2, axis=0) == odd_data).all()
assert (median(odd_data2, axis=1) ==
7*np.ones((odd_data2.shape[0],), dtype=odd_data2.dtype)).all()
def test_pcomp():
test_data_file = get_pkg_data_filename('t/pcomp_data.txt')
test_data = np.loadtxt(test_data_file, dtype='d', delimiter=',')
with raises(ValueError):
foo = pcomp(np.arange(10))
pcomp_data = test_data[0:20, :]
m = 4
n = 20
means = np.tile(pcomp_data.mean(0), n).reshape(pcomp_data.shape)
newarray = pcomp_data - means
foo = pcomp(newarray, covariance=True)
#
# This array is obtained from the IDL version of PCOMP.
# It is only accurate up to an overall sign on each column.
#
derived = test_data[20:40, :]
for k in range(m):
assert_allclose(abs(foo.derived[:, k]), abs(derived[:, k]), 1e-4)
coefficients = test_data[40:44, :]
coefficientsT = coefficients.T
for k in range(m):
assert_allclose(abs(foo.coefficients[:, k]),
abs(coefficientsT[:, k]),
1e-4)
eigenvalues = test_data[44, :]
assert_allclose(foo.eigenvalues, eigenvalues, 1e-4)
variance = test_data[45, :]
assert_allclose(foo.variance, variance, 1e-4)
#
# Test the standardization.
#
foo = pcomp(pcomp_data, standardize=True, covariance=True)
# for k in range(m):
# assert_allclose(abs(foo.derived[:, k]), abs(derived[:, k]), 1e-4)
# for k in range(m):
# assert_allclose(abs(foo.coefficients[:, k]),
# abs(coefficientsT[:, k]),
# 1e-4)
eigenvalues = test_data[46, :]
assert_allclose(foo.eigenvalues, eigenvalues, 1e-4)
variance = test_data[47, :]
assert_allclose(foo.variance, variance, 1e-4)
# assert_allclose(foo.derived[0, :], np.array([-1.64153312,
# -9.12322038,
# 1.41790708,
# -8.29359322]))
#
# Make sure correlation is working at least.
#
foo = pcomp(pcomp_data, standardize=True)
assert_allclose(foo.eigenvalues, np.array([2.84968632e+00,
1.00127640e+00,
1.48380121e-01,
6.57156222e-04]))
assert_allclose(foo.variance, np.array([7.12421581e-01,
2.50319100e-01,
3.70950302e-02,
1.64289056e-04]))
def test_rebin():
x = np.arange(40)
with raises(ValueError):
r = rebin(x, d=(10, 10))
with raises(ValueError):
r = rebin(x, d=(70,))
with raises(ValueError):
r = rebin(x, d=(30,))
x = np.array([[1.0, 2.0], [2.0, 3.0]])
rexpect = np.array([[1.0, 2.0], [1.5, 2.5], [2.0, 3.0], [2.0, 3.0]])
r = rebin(x, d=(4, 2))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 1.5, 2.0, 2.0], [2.0, 2.5, 3.0, 3.0]])
r = rebin(x, d=(2, 4))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 2.0], [1.0, 2.0], [2.0, 3.0], [2.0, 3.0]])
r = rebin(x, d=(4, 2), sample=True)
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 1.0, 2.0, 2.0], [2.0, 2.0, 3.0, 3.0]])
r = rebin(x, d=(2, 4), sample=True)
assert np.allclose(r, rexpect)
x = np.arange(10)
rexpect = np.array([0.0, 2.0, 4.0, 6.0, 8.0])
r = rebin(x, d=(5,), sample=True)
assert np.allclose(r, rexpect)
x = np.array([[1.0, 2.0, 3.0, 4.0],
[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[4.0, 5.0, 6.0, 7.0]])
rexpect = np.array([[2.0, 4.0], [4.0, 6.0]])
r = rebin(x, d=(2, 2))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 4.5],
[3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 6.5]])
r = rebin(x, d=(2, 8))
assert np.allclose(r, rexpect)
@pytest.mark.xfail
def test_rebin_int():
"""Test rebin on integers. Comparing to IDL code similar to this::
IDL> seed = 100
IDL> array = FIX(RANDOMN(seed, 10, 20) * 100, TYPE=1) ; UINT8
IDL> array_rebin = REBIN(array, 5, 10)
"""
array = np.array([[188, 186, 25, 212, 34, 98, 3, 235, 155, 148],
[107, 166, 4, 41, 101, 190, 39, 154, 153, 239],
[135, 181, 92, 161, 213, 136, 35, 61, 80, 164],
[123, 248, 8, 157, 96, 118, 99, 1, 109, 246],
[226, 71, 183, 27, 46, 99, 8, 239, 66, 25],
[ 27, 219, 37, 130, 5, 81, 65, 250, 96, 14],
[ 71, 157, 156, 136, 47, 225, 247, 191, 49, 12],
[231, 133, 9, 38, 243, 2, 235, 145, 23, 22],
[146, 38, 49, 89, 42, 57, 220, 214, 135, 47],
[101, 116, 122, 209, 141, 37, 158, 224, 245, 82],
[ 15, 47, 51, 250, 207, 193, 209, 228, 110, 1],
[ 59, 232, 216, 224, 24, 118, 190, 10, 107, 27],
[ 84, 193, 112, 206, 113, 171, 138, 117, 244, 20],
[ 5, 31, 128, 214, 200, 119, 59, 27, 57, 10],
[226, 71, 177, 85, 0, 68, 54, 207, 141, 250],
[ 52, 119, 121, 177, 165, 99, 68, 29, 137, 200],
[172, 91, 181, 187, 87, 250, 45, 154, 58, 83],
[ 56, 175, 189, 35, 203, 223, 243, 187, 252, 97],
[186, 172, 207, 128, 61, 231, 89, 57, 131, 222],
[206, 96, 29, 60, 3, 8, 221, 55, 60, 17]], dtype=np.uint8)
array_rebin = np.array([[161, 70, 105, 107, 173],
[171, 104, 140, 49, 149],
[135, 94, 57, 140, 50],
[148, 84, 129, 204, 26],
[100, 117, 69, 204, 127],
[ 88, 185, 135, 159, 61],
[ 78, 165, 150, 85, 82],
[116, 140, 83, 89, 181],
[123, 148, 190, 157, 122],
[165, 105, 75, 105, 107]], dtype=np.uint8)
ar = rebin(array, (10, 5))
assert (array_rebin == ar).all()
def test_rebin_int_sample():
"""Similar to test_rebin_int(), but using the sample option.
"""
array = np.array([[188, 186, 25, 212, 34, 98, 3, 235, 155, 148],
[107, 166, 4, 41, 101, 190, 39, 154, 153, 239],
[135, 181, 92, 161, 213, 136, 35, 61, 80, 164],
[123, 248, 8, 157, 96, 118, 99, 1, 109, 246],
[226, 71, 183, 27, 46, 99, 8, 239, 66, 25],
[ 27, 219, 37, 130, 5, 81, 65, 250, 96, 14],
[ 71, 157, 156, 136, 47, 225, 247, 191, 49, 12],
[231, 133, 9, 38, 243, 2, 235, 145, 23, 22],
[146, 38, 49, 89, 42, 57, 220, 214, 135, 47],
[101, 116, 122, 209, 141, 37, 158, 224, 245, 82],
[ 15, 47, 51, 250, 207, 193, 209, 228, 110, 1],
[ 59, 232, 216, 224, 24, 118, 190, 10, 107, 27],
[ 84, 193, 112, 206, 113, 171, 138, 117, 244, 20],
[ 5, 31, 128, 214, 200, 119, 59, 27, 57, 10],
[226, 71, 177, 85, 0, 68, 54, 207, 141, 250],
[ 52, 119, 121, 177, 165, 99, 68, 29, 137, 200],
[172, 91, 181, 187, 87, 250, 45, 154, 58, 83],
[ 56, 175, 189, 35, 203, 223, 243, 187, 252, 97],
[186, 172, 207, 128, 61, 231, 89, 57, 131, 222],
[206, 96, 29, 60, 3, 8, 221, 55, 60, 17]], dtype=np.uint8)
array_sample = np.array([[188, 25, 34, 3, 155],
[135, 92, 213, 35, 80],
[226, 183, 46, 8, 66],
[ 71, 156, 47, 247, 49],
[146, 49, 42, 220, 135],
[ 15, 51, 207, 209, 110],
[ 84, 112, 113, 138, 244],
[226, 177, 0, 54, 141],
[172, 181, 87, 45, 58],
[186, 207, 61, 89, 131]], dtype=np.uint8)
ars = rebin(array, (10, 5), sample=True)
assert (array_sample == ars).all()
def test_smooth():
test_data_file = get_pkg_data_filename('t/smooth_data.txt')
noise = np.loadtxt(test_data_file, dtype='d')
#
# Test smooth function
#
x = 8.0*np.arange(100)/100.0 - 4.0
y = np.sin(x) + 0.1*noise
s = smooth(y, 5)
assert s.shape == (100,)
s_edge = smooth(y, 5, True)
assert s_edge.shape == (100,)
s_w = smooth(y, 1)
assert (s_w == y).all()
def test_uniq():
items = np.array([1, 2, 3, 1, 5, 6, 1, 7, 3, 2, 5, 9, 11, 1])
items_sorted = np.sort(items)
items_argsorted = np.argsort(items)
#
# Test pre-sorted array.
#
u1 = uniq(items_sorted)
assert (u1 == np.array([3, 5, 7, 9, 10, 11, 12, 13])).all()
#
# Test arg-sorted array.
#
u2 = uniq(items, items_argsorted)
assert (u2 == np.array([13, 9, 8, 10, 5, 7, 11, 12])).all()
assert (items_sorted[u1] == items[u2]).all()
#
# Test degenerate case of all identical items.
#
identical_items = np.ones((10,), dtype=items.dtype)
u = uniq(identical_items)
assert (u == np.array([9])).all()
u = uniq(identical_items, np.arange(10, dtype=items.dtype))
assert (u == np.array([9])).all()
|
py | 1a3868b396029bffb5aa0742660a6d1f3a4a5544 | from flask import Flask, render_template
from flask_pymongo import PyMongo
from .utils import login_required
def create_app(config_file=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile(config_file)
app.mongo = PyMongo(app)
# ToDo: move to own Blueprint
@app.route('/faq')
def faq():
return render_template('faq.html', title='FAQ')
# ToDo: move to own Blueprint
@app.route('/dashboard')
@login_required
def dashboard():
stats = {}
stats['num_orgs'] = app.mongo.db.organisations.find().count()
stats['num_users'] = app.mongo.db.users.find().count()
return render_template('dashboard.html', title='Dashboard', stats=stats)
register_blueprints(app)
return app
def register_blueprints(app):
from scippycrm.organisations import organisations_blueprint
app.register_blueprint(organisations_blueprint)
from scippycrm.users import users_blueprint
app.register_blueprint(users_blueprint) |
py | 1a3869ee93e43f56fea750d7828bfff68820e6ab | """
CRUD de SQLite3 con Python 3
@author parzibyte
Más tutoriales en: parzibyte.me/blog
"""
import sqlite3
try:
#Conectar a la base de datos
bd = sqlite3.connect("libros.db")
cursor = bd.cursor()
#Listar los libros
sentencia = "SELECT *,rowid FROM libros;"
cursor.execute(sentencia)
libros = cursor.fetchall()
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+{:-<10}+".format("", "", "", "", ""))
print("|{:^20}|{:^20}|{:^10}|{:^50}|{:^10}|".format("Autor", "Género", "Precio", "Título", "Rowid"))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+{:-<10}+".format("", "", "", "", ""))
for autor, genero, precio, titulo, rowid in libros:
print("|{:^20}|{:^20}|{:^10}|{:^50}|{:^10}|".format(autor, genero, precio, titulo, rowid))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+{:-<10}+".format("", "", "", "", ""))
#Pedir id del libro a editar
id_libro = input("\nEscribe el id del libro que quieres editar: ")
if not id_libro:
print("No escribiste nada")
exit()
#Pedir nuevos datos
autor = input("\nNuevo autor: ")
genero = input("\nNuevo género: ")
precio = float(input("\nNuevo precio: "))
titulo = input("\nNuevo título: ")
#Sentencia para actualizar
sentencia = "UPDATE libros SET autor = ?, genero = ?, precio = ?, titulo = ? WHERE rowid = ?;"
#Actualizar datos
cursor.execute(sentencia, [autor, genero, precio, titulo, id_libro])
bd.commit()
print("Datos guardados")
except sqlite3.OperationalError as error:
print("Error al abrir:", error) |
py | 1a386a28274d06bb96bd57a19bdd1107e96cd809 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, osv
# We just create a new model
class mother(models.Model):
_name = 'test.inherit.mother'
_columns = {
# check interoperability of field inheritance with old-style fields
'name': osv.fields.char('Name'),
'state': osv.fields.selection([('a', 'A'), ('b', 'B')], string='State'),
}
_defaults = {
'name': 'Foo',
}
surname = fields.Char(compute='_compute_surname')
@api.one
@api.depends('name')
def _compute_surname(self):
self.surname = self.name or ''
# We want to inherits from the parent model and we add some fields
# in the child object
class daughter(models.Model):
_name = 'test.inherit.daughter'
_inherits = {'test.inherit.mother': 'template_id'}
template_id = fields.Many2one('test.inherit.mother', 'Template',
required=True, ondelete='cascade')
field_in_daughter = fields.Char('Field1')
# We add a new field in the parent object. Because of a recent refactoring,
# this feature was broken.
# This test and these models try to show the bug and fix it.
class mother(models.Model):
_inherit = 'test.inherit.mother'
field_in_mother = fields.Char()
# extend the name field: make it required and change its default value
name = fields.Char(required=True, default='Bar')
# extend the selection of the state field
state = fields.Selection(selection_add=[('c', 'C')])
# override the computed field, and extend its dependencies
@api.one
@api.depends('field_in_mother')
def _compute_surname(self):
if self.field_in_mother:
self.surname = self.field_in_mother
else:
super(mother, self)._compute_surname()
class mother(models.Model):
_inherit = 'test.inherit.mother'
# extend again the selection of the state field
state = fields.Selection(selection_add=[('d', 'D')])
class daughter(models.Model):
_inherit = 'test.inherit.daughter'
# simply redeclare the field without adding any option
template_id = fields.Many2one()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
py | 1a386a607464ccc513671ae9f96ab02ec09f8c3a | print("Double Digit Calculator")
num1 = int(input("Num 1: "))
num2 = int(input("Num 2: "))
equation = input("Equation (+, -, *, /)")
if equation == "+":
print(num1 + num2)
elif equation == "-":
print(num1 - num2)
elif equation == "*":
print(num1 * num2)
elif equation == "/":
print(num1 / num)
else:
print("SYNTAX ERROR") |
py | 1a386dececc58044096b839d4a168412f59ffff9 | """
:codeauthor: Jayesh Kariya <[email protected]>
"""
import pytest
import salt.states.apache as apache
import salt.utils.files
from tests.support.mock import MagicMock, mock_open, patch
@pytest.fixture
def configure_loader_modules():
return {apache: {}}
def test_configfile():
"""
Test to allows for inputting a yaml dictionary into a file
for apache configuration files.
"""
with patch("os.path.exists", MagicMock(return_value=True)):
name = "/etc/distro/specific/apache.conf"
config = 'VirtualHost: this: "*:80"'
new_config = 'LiteralHost: that: "*:79"'
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
with patch.object(salt.utils.files, "fopen", mock_open(read_data=config)):
mock_config = MagicMock(return_value=config)
with patch.dict(apache.__salt__, {"apache.config": mock_config}):
ret.update({"comment": "Configuration is up to date."})
assert apache.configfile(name, config) == ret
with patch.object(salt.utils.files, "fopen", mock_open(read_data=config)):
mock_config = MagicMock(return_value=new_config)
with patch.dict(apache.__salt__, {"apache.config": mock_config}):
ret.update(
{
"comment": "Configuration will update.",
"changes": {"new": new_config, "old": config},
"result": None,
}
)
with patch.dict(apache.__opts__, {"test": True}):
assert apache.configfile(name, new_config) == ret
with patch.object(salt.utils.files, "fopen", mock_open(read_data=config)):
mock_config = MagicMock(return_value=new_config)
with patch.dict(apache.__salt__, {"apache.config": mock_config}):
ret.update(
{"comment": "Successfully created configuration.", "result": True}
)
with patch.dict(apache.__opts__, {"test": False}):
assert apache.configfile(name, config) == ret
|
py | 1a386ee00dd8450c82d6284a3c5f4296319aacb6 | import os
from typing import List
import sqlite3
from datamodels import car
def setupNewDB(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isfile(dirpath + "/newdata.db"):
os.remove(dirpath + "/newdata.db")
newdb = sqlite3.connect(dirpath + "/newdata.db")
try:
newdb.execute(
"CREATE TABLE cars(id TEXT, title TEXT, url TEXT, price TEXT, img TEXT, cdata TEXT)"
)
except sqlite3.OperationalError:
print("Error setting up the database")
newdb.close()
quit()
return newdb
def insertResults(db, results):
for res in results:
db.execute(
"INSERT INTO cars VALUES (?,?,?,?,?,?)",
(res.listing_id, res.title, res.url, res.price, res.img, res.data),
)
db.commit()
def findChanges(dirpath, results: List[car]) -> List[car]:
changes = []
newIDs = list(map(lambda newresult: newresult.listing_id, results))
if not os.path.isfile(dirpath + "/data.db"):
changes = list(map(lambda item: item.with_change_reasons("new"), results))
else:
olddb = sqlite3.connect(dirpath + "/data.db")
for currentCar in results:
oldres = olddb.execute(
"SELECT * from cars WHERE id=?", [currentCar.listing_id]
).fetchone()
if oldres is not None:
oldcar = car(*oldres)
if oldcar != currentCar:
changes.append(
currentCar.with_change_reasons(
'changed',
currentCar.diffFromOld(oldcar),
)
)
else:
changes.append(currentCar.with_change_reasons('new'))
oldCarData = olddb.execute("SELECT * from cars").fetchall()
oldCars = list(map(lambda tpl: car(*tpl), oldCarData))
for oldCar in oldCars:
if oldCar.listing_id not in newIDs:
changes.append(oldCar.with_change_reasons("deleted"))
olddb.close()
return changes
def archiveDatabase(dirpath):
if os.path.isfile(dirpath + "/data.db"):
os.remove(dirpath + "/data.db")
os.rename(dirpath + "/newdata.db", dirpath + "/data.db")
|
py | 1a386f49304cc2f57bd14bdd434a3b46e44640bb | from flask import render_template, safe_join, send_file
from flask_login import login_required
from redash import settings
from redash.handlers import routes
from redash.handlers.authentication import base_href
from redash.handlers.base import org_scoped_rule
def render_index():
if settings.MULTI_ORG:
response = render_template("multi_org.html", base_href=base_href())
else:
full_path = safe_join(settings.STATIC_ASSETS_PATH, 'index.html')
response = send_file(full_path, **dict(cache_timeout=0, conditional=True))
return response
@routes.route(org_scoped_rule('/<path:path>'))
@routes.route(org_scoped_rule('/'))
@login_required
def index(**kwargs):
return render_index()
|
py | 1a386f7fbdad0e3aade871ee3f6b9bf46764f8ac | import json
import os
import time
from .baseclient import BaseClient
from .payloads import Payload
from .utils import remove_none
class Presence(BaseClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, pid: int = os.getpid(),
state: str = None, details: str = None,
start: int = None, end: int = None,
large_image: str = None, large_text: str = None,
small_image: str = None, small_text: str = None,
party_id: str = None, party_size: list = None,
join: str = None, spectate: str = None, buttons: str = None
match: str = None, instance: bool = True,
_donotuse=True):
if _donotuse is True:
payload = Payload.set_activity(pid, state, details, start, end, large_image, large_text,
small_image, small_text, party_id, party_size, join, spectate,
match, instance, activity=True)
else:
payload = _donotuse
self.send_data(1, payload)
return self.loop.run_until_complete(self.read_output())
def clear(self, pid: int = os.getpid()):
payload = Payload.set_activity(pid, activity=None)
self.send_data(1, payload)
return self.loop.run_until_complete(self.read_output())
def connect(self):
self.update_event_loop(self.get_event_loop())
self.loop.run_until_complete(self.handshake())
def close(self):
self.send_data(2, {'v': 1, 'client_id': self.client_id})
self.sock_writer.close()
self.loop.close()
class AioPresence(BaseClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, isasync=True)
async def update(self, pid: int = os.getpid(),
state: str = None, details: str = None,
start: int = None, end: int = None,
large_image: str = None, large_text: str = None,
small_image: str = None, small_text: str = None,
party_id: str = None, party_size: list = None,
join: str = None, spectate: str = None,buttons: str = None
match: str = None, instance: bool = True):
payload = Payload.set_activity(pid, state, details, start, end, large_image, large_text,
small_image, small_text, party_id, party_size, join, spectate,
match, instance,buttons activity=True)
self.send_data(1, payload)
return await self.read_output()
async def clear(self, pid: int = os.getpid()):
payload = Payload.set_activity(pid, activity=None)
self.send_data(1, payload)
return await self.read_output()
async def connect(self):
self.update_event_loop(self.get_event_loop())
await self.handshake()
def close(self):
self.send_data(2, {'v': 1, 'client_id': self.client_id})
self.sock_writer.close()
self.loop.close()
|
py | 1a386fadac5318cacc2681b14399416ca6694a88 | '''
How to pickle data (any object). Note, if you want to use a forloop, must
actually use a while loop that compares filesize to current byte location.
Otherwise, need to use some kind of exception-based method.
exception-based forloop: https://stackoverflow.com/questions/18675863/load-data-from-python-pickle-file-in-a-loop
'''
import pickle # only works for python3. for py2, need to do other stuff
import numpy as np
import os
print('='*60)
print('example 1: pickle a single object')
a={letter:num for num,letter in enumerate(list('abcdef'))}
fname='delme.pkl'
with open(fname,'wb') as f:
pickle.dump(a,f)
with open(fname,'rb') as f2:
b=pickle.load(f2)
print('original:',a)
print('compare original and loaded:',a==b)
os.remove(fname)
print('file removed. done')
# ==============================================================================
print('='*60)
print('example 2: pickle multiple objects')
temp=[np.arange(i+1) for i in range(0,4)] # get multiple vectors to pickle
print('complete data:')
print(temp)
fname='example.pickle' # note, extension can pretty much be anything you want.
ftemp=open(fname,'wb')
for ivec in temp:
print('dumping:',ivec)
pickle.dump(ivec,ftemp)
ftemp.close()
print('done')
print('=','now, load file and display contents','='*10)
# next, read the file back in
ftemp=open(fname,'rb')
xx=[]
while ftemp.tell()<os.path.getsize('example.pickle'):
xx.append(pickle.load(ftemp,encoding='bytes'))
print('loaded: ',xx[-1])
ftemp.close()
print('complete data:')
print(xx)
os.remove(fname)
print('file removed. done')
# eof
|
py | 1a38700307c1c5bc6808b3b5542f44b525246667 | '''
Support for Tomcat
'''
# Import Python Libs
import os
def __catalina_home():
'''
Tomcat paths differ depending on packaging
'''
locations = ['/usr/share/tomcat6', '/opt/tomcat']
for location in locations:
if os.path.isdir(location):
return location
def version():
'''
Return server version from catalina.sh version
CLI Example::
salt '*' tomcat.version
'''
cmd = __catalina_home() + '/bin/catalina.sh version'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if not line:
continue
if 'Server version' in line:
comps = line.split(': ')
return comps[1]
def fullversion():
'''
Return all server information from catalina.sh version
CLI Example::
salt '*' tomcat.fullversion
'''
cmd = __catalina_home() + '/bin/catalina.sh version'
ret = {}
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if not line:
continue
if ': ' in line:
comps = line.split(': ')
ret[comps[0]] = comps[1]
return ret
def signal(signal=None):
'''
Signals catalina to start, stop, securestart, forcestop.
CLI Example::
salt '*' tomcat.signal start
'''
valid_signals = {'forcestop': 'stop -force',
'securestart': 'start -security',
'start': 'start',
'stop': 'stop'}
if not valid_signals[signal]:
return
cmd = '{0}/bin/catalina.sh {1}'.format(
__catalina_home(), valid_signals[signal]
)
__salt__['cmd.run'](cmd)
|
py | 1a38705f1e81d94f27da81d774b5646461d1f3e7 | """
This module demonstrates BUILDING-UP a new SEQUENCE,
one item at a time, using the ACCUMULATOR pattern.
-- We will later see a more efficient way to build-up and/or modify
sequences, namely by MUTATING their elements.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Jacob Tebbe.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# -----------------------------------------------------------------------------
# DONE: 2. READ the program below and RUN it.
#
# When you have read it, asking questions as needed,
# and you feel that you understand,
# for each of LISTS, STRINGS and TUPLES:
# -- HOW to BUILD UP them, using the ACCUMULATOR pattern.
# then:
# change the above _TODO_ to DONE.
# -----------------------------------------------------------------------------
def main():
"""
Demonstrates building sequences by using the Accumulator pattern.
"""
print()
print('-----------------------------------------------------------')
print('Build and then print a LIST:')
print('-----------------------------------------------------------')
build_list()
print()
print('-----------------------------------------------------------')
print('Build and then print a TUPLE:')
print('-----------------------------------------------------------')
build_tuple()
print()
print('-----------------------------------------------------------')
print('Build and then print a STRING:')
print('-----------------------------------------------------------')
build_string()
def build_list():
"""
Demonstrates building a new LIST by using the Accumulator pattern.
We will later see a more efficient way to build/modify lists,
namely, by mutating the elements of the list.
"""
# -------------------------------------------------------------------------
# Here is the Accumulator pattern for building up LISTs:
#
# 1. BEFORE the loop, initialize the list variable
# (the "accumulator") to the empty list [].
#
# 2. LOOP, appending items one at a time (each time thru the loop)
#
# 3. INSIDE the loop:
#
# a. Use + to concatenate:
# -- the existing list, and (followed by)
# -- the one-element list containing the new item
# thus constructing a new list with the new item appended.
#
# b. Re-assign the list variable to the NEW list.
#
# 4. AFTER the loop, the variable is the entire "built up" list.
# -------------------------------------------------------------------------
""" This example builds (and then prints) the LIST
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81] """
sequence = []
for k in range(10):
sequence = sequence + [k ** 2]
print(sequence)
def build_tuple():
"""
Demonstrates building a TUPLE by using the Accumulator pattern.
-- A tuple is just like a list except:
1. It is IMMUTABLE, which means that its elements cannot be
changed (more on that later), and
2. Its notation uses ()s instead of []s. Also,
a one-element tuple requires a COMMA after the item.
"""
# -------------------------------------------------------------------------
# The Accumulator pattern for building up TUPLEs
# is the same as for LISTs except:
# -- Initialize the list variable (the "accumulator")
# to the empty TUPLE () instead of the empty LIST [].
# -- Concatenate the one-element TUPLE: (blah,)
# instead of the one-element LIST: [blah]
# NOTE the COMMA required for a one-element tuple.
# -------------------------------------------------------------------------
""" This example builds (and then prints) the TUPLE
(0, 1, 4, 9, 16, 25, 36, 49, 64, 81) """
sequence = ()
for k in range(10):
sequence = sequence + (k ** 2,)
print(sequence)
def build_string():
"""
Demonstrates building a STRING by using the Accumulator pattern.
We will later see a more efficient way to build/modify strings,
namely, by using the split/join methods.
"""
# -------------------------------------------------------------------------
# The Accumulator pattern for building up STRINGs
# is the same as for LISTs except:
# -- Initialize the list variable (the "accumulator")
# to the empty STRING '' instead of the empty LIST [].
# -- Concatenate the one (or more) element STRING: 'blah'
# instead of the one-element LIST: [blah]
#
# The built-in str function returns a string version
# of its argument.
# -------------------------------------------------------------------------
""" This example builds (and then prints) the STRING
0 1 4 9 16 25 36 49 64 81 """
sequence = ''
for k in range(10):
sequence = sequence + str(k ** 2) + ' '
print(sequence)
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
py | 1a3870a91a6f88779b6386b8ab8e64b9560ddd4f | from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.tdtrawio import TdtRawIO
class TdtIO(TdtRawIO, BaseFromRaw):
"""
Class for reading data from from Tucker Davis TTank format.
Terminology:
TDT holds data with tanks (actually a directory). And tanks hold sub blocks
(sub directories).
Tanks correspond to Neo Blocks and TDT blocks correspond to Neo Segments.
"""
_prefered_signal_group_mode = 'group-by-same-units'
mode = 'dir'
def __init__(self, dirname):
TdtRawIO.__init__(self, dirname=dirname)
BaseFromRaw.__init__(self, dirname)
|
py | 1a3872008dc219a26b740689445ccdc981b244b9 | DEFAULT_HEADERS: dict = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,applica'
'tion/signed-exchange;v=b3;q=0.9',
'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',
'referer': 'https://www.estantevirtual.com.br/livreiros/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/'
'537.36'
}
DEFAULT_URL_BASE: str = 'https://www.estantevirtual.com.br'
DEFAULT_WAIT_REQUEST: int = 1
DEFAULT_FILTER_BY_ATTRS: list = ['bookName', 'author', 'releaseYear', 'publishingCompany', 'type', 'weight']
DEFAULT_OFFSET: int = 1
class Language:
PORTUGUESE: str = 'Portugu%C3%AAs'
|
py | 1a3872083131ec0264c8e5a9b4081ceec4c5c51d | '''
This file contains various useful statistical methods
'''
import numpy as np
#from lightcone import _get_slice
def skewness(x):
'''
Calculate the skewness of an array.
Note that IDL calculates the skewness in a slightly different way than Python.
This routine uses the IDL definition.
Parameters:
x (ndarray): The array containing the input data.
Returns:
The skewness.
'''
mx = np.mean(x)
n = np.size(x)
xdiff = x-mx
#return (sum(xdiff**3)/n)/((sum(xdiff**2)/n)**(3./2.)) #This is how SciPy does it
return (np.sum(xdiff**3)/n)/((np.sum(xdiff**2)/(n-1))**(3./2.))
def kurtosis(x):
'''
Calculate the kurtosis of an array.
It uses the definition given in Ross et al. (2017).
Parameters:
x (ndarray): The array containing the input data
Returns:
The kurtosis.
'''
mx = np.mean(x)
n = np.size(x)
xdiff = x-mx
#return (sum(xdiff**3)/n)/((sum(xdiff**2)/n)**(3./2.)) #This is how SciPy does it
return (np.sum(xdiff**4)/n)/((np.sum(xdiff**2)/(n-1))**(2.))
def mass_weighted_mean_xi(xi, rho):
''' Calculate the mass-weighted mean ionization fraction.
Parameters:
xi (ndarray): the ionized fraction
rho (ndarray): the density (arbitrary units)
Returns:
The mean mass-weighted ionized fraction.
'''
xi = xi.astype('float64')
rho = rho.astype('float64')
return np.mean(xi*rho)/np.mean(rho)
def subtract_mean_signal(signal, los_axis=2):
'''
Subtract the mean of the signal along the los axis.
Parameters:
signal (ndarray): the signal to subtract the mean from
los_axis (int): the line-of-sight axis (Default: 2)
Returns:
The signal with the mean subtracted
TODO:vectorize
'''
signal_out = signal.copy()
for i in range(signal.shape[los_axis]):
if los_axis in [0,-3]:
signal_out[i,:,:] -= signal[i,:,:].mean()
if los_axis in [1,-2]:
signal_out[:,i,:] -= signal[:,i,:].mean()
if los_axis in [2,-1]:
signal_out[:,:,i] -= signal[:,:,i].mean()
return signal_out
def signal_overdensity(signal, los_axis):
'''
Divide by the mean of the signal along the los axis and subtract one.
Parameters:
signal (ndarray): the signal to subtract the mean from
los_axis (int): the line-of-sight axis
Returns:
The signal with the mean subtracted
TODO:vectorize
'''
signal_out = signal.copy()
for i in range(signal.shape[los_axis]):
if los_axis == 0:
signal_out[i,:,:] /= signal[i,:,:].mean()
if los_axis == 1:
signal_out[:,i,:] /= signal[:,i,:].mean()
if los_axis == 2:
signal_out[:,:,i] /= signal[:,:,i].mean()
return signal_out - 1.
def apply_func_along_los(signal, func, los_axis):
'''
Apply a function, such as np.var() or np.mean(), along
the line-of-sight axis of a signal on a
per-slice basis.
Parameters:
signal (ndarray): the signal
func (callable): the function to apply
los_axis (int): the line-of-sight axis
Returns:
An array of length signal.shape[los_axis]
Example:
Calculate the variance of a lightcone along the
line-of-sight:
>>> lightcone = t2c.read_cbin('my_lightcone.cbin')
>>> dT_var = t2c.apply_func_along_los(lightcone, np.var, 2)
'''
assert los_axis >= 0 and los_axis < len(signal.shape)
output = np.zeros(signal.shape[los_axis])
for i in range(len(output)):
signal_slice = _get_slice(signal, i, los_axis)
output[i] = func(signal_slice)
return output
def _get_slice(data, idx, los_axis, slice_depth=1):
'''
Slice a data cube along a given axis. For internal use.
'''
assert len(data.shape) == 3 or len(data.shape) == 4
assert los_axis >= 0 and los_axis < 3
idx1 = idx
idx2 = idx1+slice_depth
if len(data.shape) == 3: #scalar field
if los_axis == 0:
return np.squeeze(data[idx1:idx2,:,:])
elif los_axis == 1:
return np.squeeze(data[:,idx1:idx2,:])
return np.squeeze(data[:,:,idx1:idx2])
else: #Vector field
if los_axis == 0:
return np.squeeze(data[:,idx1:idx2,:,:])
elif los_axis == 1:
return np.squeeze(data[:,:,idx1:idx2,:])
return np.squeeze(data[:,:,:,idx1:idx2])
|
py | 1a387242693a78b2b3479ed3496c6ab7c078dfa5 |
class Sibling:
pass
|
py | 1a3872ad06291896a916f0b9c6b47a54cb9fe182 | import os
import sys
import subprocess
from tqdm import tqdm
from Bio.Seq import Seq
from Bio import SeqIO, SearchIO
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastpCommandline
from src.python.preprocess2 import *
from itertools import cycle
import matplotlib.pyplot as plt
from pymongo import MongoClient
from tempfile import gettempdir
tmp_dir = gettempdir()
from concurrent.futures import ThreadPoolExecutor
import argparse
ASPECT = 'F'
ONTO = None
PRIOR = None
THRESHOLDS = np.arange(.05, 1, .05)
cleanup = True
eps = 10e-6
def init_GO(asp=ASPECT, src=None):
global ONTO, ASPECT
if src: set_obo_src(src)
ASPECT = asp
ONTO = get_ontology(asp)
return ONTO
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB")
def load_all_data():
mf, _ = load_data(db, asp='F', codes=exp_codes)
cc, _ = load_data(db, asp='C', codes=exp_codes)
bp, _ = load_data(db, asp='P', codes=exp_codes)
return mf, cc, bp
def _prepare_naive(reference):
global PRIOR
prior_pth = os.path.join(tmp_dir, 'prior-%s.npy' % GoAspect(ASPECT))
if os.path.exists(prior_pth):
PRIOR = np.load(prior_pth).item()
go2count = {}
for _, go_terms in reference.items():
for go in go_terms:
if go in go2count:
go2count[go] += 1
else:
go2count[go] = 1
total = len(reference)
prior = {go: count/total for go, count in go2count.items()}
np.save(prior_pth, prior)
PRIOR = prior
def _naive(target, reference):
global PRIOR
if not PRIOR:
_prepare_naive(reference)
return PRIOR
def _prepare_blast(sequences):
# print('### entering _prepare_blast')
blastdb_pth = os.path.join(tmp_dir, 'blast-%s' % GoAspect(ASPECT))
records = [SeqRecord(Seq(seq), id) for id, seq in sequences.items()]
SeqIO.write(records, open(blastdb_pth, 'w+'), "fasta")
os.system("makeblastdb -in %s -dbtype prot" % blastdb_pth)
def parallel_blast(targets, reference, num_cpu=4):
blastdb_pth = os.path.join(tmp_dir, 'blast-%s' % GoAspect(ASPECT))
records = [SeqRecord(Seq(seq), id) for id, seq in reference.items()]
SeqIO.write(records, open(blastdb_pth, 'w+'), "fasta")
os.system("makeblastdb -in %s -dbtype prot" % blastdb_pth)
predictions = dict()
e = ThreadPoolExecutor(num_cpu)
def _parallel_blast_helper(s):
return s[0], _blast(SeqRecord(Seq(s[1]), s[0]), reference, topn=None, choose_max_prob=True)
pbar = tqdm(range(len(targets)), desc="blast2go processed")
for tgtid, preds in e.map(_parallel_blast_helper, targets.items()):
predictions[tgtid] = preds
pbar.update(1)
pbar.close()
return predictions
def _blast(target_fasta, reference, topn=None, choose_max_prob=True):
seqid, asp = target_fasta.id, GoAspect(ASPECT)
query_pth = os.path.join(tmp_dir, "%s-%s.fas" % (seqid, asp))
output_pth = os.path.join(tmp_dir, "%s-%s.out" % (seqid, asp))
database_pth = os.path.join(tmp_dir, 'blast-%s' % asp)
SeqIO.write(target_fasta, open(query_pth, 'w+'), "fasta")
cline = NcbiblastpCommandline(query=query_pth, db=database_pth, out=output_pth,
outfmt=5, evalue=0.001, remote=False, ungapped=False)
child = subprocess.Popen(str(cline),
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
handle, _ = child.communicate()
assert child.returncode == 0
blast_qresult = SearchIO.read(output_pth, 'blast-xml')
annotations = {}
for hsp in blast_qresult.hsps[:topn]:
if hsp.hit.id == seqid:
continue
ident = hsp.ident_num / hsp.hit_span
for go in reference[hsp.hit.id]:
if go in annotations:
annotations[go].append(ident)
else:
annotations[go] = [ident]
for go, ps in annotations.items():
if choose_max_prob:
annotations[go] = max(ps)
else:
annotations[go] = 1 - np.prod([(1 - p) for p in ps])
if cleanup:
os.remove(query_pth)
os.remove(output_pth)
return annotations
def _predict(reference_annots, target_seqs, func_predict, binary_mode=False):
if len(target_seqs) > 1:
pbar = tqdm(range(len(target_seqs)), desc="targets processed")
else:
pbar = None
if binary_mode:
predictions = np.zeros((len(target_seqs), len(ONTO.classes)))
for i, (_, seq) in enumerate(target_seqs.items()):
preds = func_predict(seq, reference_annots)
bin_preds = ONTO.binarize([list(preds.keys())])[0]
for go, prob in preds.items():
bin_preds[ONTO[go]] = prob
predictions[i, :] = bin_preds
if pbar: pbar.update(1)
else:
predictions = {}
for _, (seqid, seq) in enumerate(target_seqs.items()):
predictions[seqid] = func_predict(SeqRecord(Seq(seq), seqid), reference_annots)
if pbar: pbar.update(1)
if pbar: pbar.close()
return predictions
def bin2dict(distribution, classes):
return {classes[i]: prob for i, prob in enumerate(distribution)}
def get_P_and_T_from_dictionaries(tau, predictions, targets):
assert len(predictions) == len(targets)
P, T = [], []
for seqid, seq_targets in targets.items():
assert len(seq_targets) > 0
seq_preds = predictions[seqid]
seq_annots = [go for go, prob in seq_preds.items() if prob >= tau]
P.append(set(seq_annots))
T.append(set(seq_targets))
assert len(P) == len(T)
return P, T
def get_P_and_T_from_arrays(tau, predictions, targets, classes):
assert len(predictions) == len(targets)
P, T = [], []
classes_arr = np.asarray(classes)
for prob_arr in map(lambda p: np.asarray(p), predictions):
annots = classes_arr[prob_arr >= tau]
P.append(set(annots))
for prob_arr in map(lambda t: np.asarray(t), targets):
annots = classes_arr[prob_arr == 1.0]
assert len(annots) == sum(prob_arr)
T.append(set(annots))
assert len(P) == len(T)
return P, T
def precision(tau, predictions, targets, classes=None):
assert type(predictions) == type(targets)
if isinstance(predictions, dict):
P, T = get_P_and_T_from_dictionaries(tau, predictions, targets)
else:
assert classes
P, T = get_P_and_T_from_arrays(tau, predictions, targets, classes)
ret = [(len(P_i & T_i) / len(P_i)) if len(P_i) else 1.0 for P_i, T_i in zip(P, T)]
return ret
def recall(tau, predictions, targets, classes=None, partial_evaluation=False):
assert type(predictions) == type(targets)
if isinstance(predictions, dict):
P, T = get_P_and_T_from_dictionaries(tau, predictions, targets)
else:
assert classes
P, T = get_P_and_T_from_arrays(tau, predictions, targets, classes)
if partial_evaluation:
P, T = zip(*[(P_i, T_i) for P_i, T_i in zip(P, T) if len(P_i) > 0])
ret = [(len(P_i & T_i) / len(T_i)) if len(P_i) else 0.0 for P_i, T_i in zip(P, T)]
return ret
def F_beta(pr, rc, beta=1):
pr = max(pr, eps)
rc = max(rc, eps)
return (1 + beta ** 2) * ((pr * rc) / (((beta ** 2) * pr) + rc))
def F1(pr, rc):
return F_beta(pr, rc, beta=1)
def predict(reference_seqs, reference_annots, target_seqs, method, basename=""):
filename = "%s_%s.npy" % (method, basename)
if method == "blast":
pred_path = os.path.join(tmp_dir, filename)
if basename and os.path.exists(pred_path):
return np.load(pred_path).item()
_prepare_blast(reference_seqs)
predictions = _predict(reference_annots, target_seqs, _blast)
np.save(pred_path, predictions)
return predictions
elif method == "naive":
_prepare_naive(reference_annots)
predictions = _predict(reference_annots, target_seqs, _naive)
return predictions
elif method == "deepseq":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
elif method == "seq2go":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
elif method == "seq2go-proba":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
else:
print("Unknown method")
def performance(predictions, ground_truth, classes=None, ths=THRESHOLDS):
prs, rcs, f1s = [], [], []
for tau in ths:
pr_per_seq = precision(tau, predictions, ground_truth, classes)
rc_per_seq = recall(tau, predictions, ground_truth, classes)
pr_tau = np.mean(pr_per_seq)
rc_tau = np.mean(rc_per_seq)
prs.append(pr_tau)
rcs.append(rc_tau)
f1s.append(np.mean(F1(pr_tau, rc_tau)))
return ths, prs, rcs, f1s
def plot_precision_recall(perf):
# Plot Precision-Recall curve
lw, n = 2, len(perf)
methods = list(perf.keys())
prs = [v[1] for v in perf.values()]
rcs = [v[2] for v in perf.values()]
f1s = [v[3] for v in perf.values()]
colors = cycle(['red', 'blue', 'navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
# Plot Precision-Recall curve for each class
plt.clf()
for i, color in zip(range(len(methods)), colors):
plt.plot(rcs[i], prs[i], color=color, lw=lw,
label='{0} (F_max = {1:0.2f})'
.format(methods[i], max(f1s[i])))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(GoAspect(ASPECT))
plt.legend(loc="lower right")
plt.show()
def evaluate_performance(db, methods, asp='F', train_and_validation_data=None, filename=None, plot=1):
onto = init_GO(asp)
if train_and_validation_data:
seqs_train, annots_train, seqs_valid, annots_valid = train_and_validation_data
else:
seqs_train, annots_train, seqs_valid, annots_valid = load_training_and_validation(db, None)
annots_train = propagate_labels(annots_train, onto, include_root=False)
annots_valid = propagate_labels(annots_valid, onto, include_root=False)
perf = {}
for meth in methods:
pred = predict(seqs_train, annots_train, seqs_valid, meth, filename)
perf[meth] = performance(pred, annots_valid)
if plot == 1:
plot_precision_recall(perf)
return pred, perf
def product_of_experts(*predictions):
def go2p2go2ps(go2p_arr):
go2ps = dict()
for go2p in go2p_arr:
for go, prob in go2p.items():
if go in go2ps:
go2ps[go].append(prob)
else:
go2ps[go] = [prob]
return go2ps
poe = dict()
for pred in predictions:
for seqid, go2prob in pred.items():
if seqid in poe:
poe[seqid].append(pred[seqid])
else:
poe[seqid] = [pred[seqid]]
for seqid, arr in poe.items():
poe[seqid] = go2p2go2ps(arr)
for seqid, go2prob in poe.items():
for go, ps in go2prob.items():
poe[seqid][go] = 1 - np.prod([(1 - p) for p in ps])
return poe
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
client = MongoClient(args.mongo_url)
db = client['prot2vec']
lim = 100
init_GO(ASPECT)
t0 = datetime(2017, 1, 1, 0, 0)
t1 = datetime.utcnow()
seqs_train, annots_train, seqs_valid, annots_valid = load_training_and_validation(db, t0, t1, ASPECT, lim)
predictions_blast = predict(seqs_train, annots_train, seqs_valid, "blast")
ths, prs, rcs, f1s = performance(predictions_blast, annots_valid)
import json
print(json.dumps(predictions_blast, indent=1))
print(json.dumps(annots_valid, indent=1))
import pandas as pd
print(pd.DataFrame({"Threshold": ths, "Precision": prs, "Recall": rcs, "F1": f1s}).head(20))
print(len(seqs_train), len(seqs_valid), len(predictions_blast))
|
py | 1a3872f1768f4c6b02a62b5fd2163e48f689e863 | import csv
from datetime import datetime, time
from decimal import Decimal
from openpyxl import load_workbook, Workbook
from employee.models import Employee
from .models import MaximoTicket, MaximoTimeRegister
import logging
logger = logging.getLogger(__name__)
__author__ = 'lberrocal'
def row_to_dictionary(excel_row, mappings):
data = dict()
for attribute, position in mappings.items():
data[attribute] = excel_row[position].value
return data
def parse_hours(str_hours):
parts = str_hours.split(':')
return Decimal(parts[0]) + Decimal(parts[1]) / 60
def parse_datetime_hours(hours):
return Decimal(hours.hour + hours.minute / 60.0)
def decimal_to_time(decimal_hours):
hour = int(decimal_hours)
minute = int((decimal_hours - int(decimal_hours)) * Decimal(60.0))
return time(hour, minute, 0)
class AbstractMaximoData(object):
LOAD_TICKETS = 'LOAD_TICKETS'
LOAD_TIME = 'LOAD_TIME'
LOAD_ALL = 'LOAD_ALL'
def __init__(self, stdout=None):
self.ticket_mappings = {'ticket_type': 0, 'number': 1, 'name': 2}
self.time_register_mappings = {'company_id': 0,
'regular_hours': 1,
'date': 3,
'username': 5,
'pay_rate': 7,
'wo_number': 8,
'ticket_type': 11,
'ticket_number': 12,
'description': 6}
self.stdout = stdout
def write(self, msg):
if self.stdout:
self.stdout.write(msg)
def _get_maximo_ticket_info(self, row):
ticket_type = row[self.time_register_mappings['ticket_type']]
if not isinstance(ticket_type, str):
ticket_type = ticket_type.value
if ticket_type not in [MaximoTicket.MAXIMO_SR]:
ticket_type = MaximoTicket.MAXIMO_WORKORDER
if ticket_type == MaximoTicket.MAXIMO_WORKORDER:
number = row[self.time_register_mappings['wo_number']]
else:
number = row[self.time_register_mappings['ticket_number']]
if not isinstance(number, str):
number = number.value
return ticket_type, number
class MaximoCSVData(AbstractMaximoData):
def _parse_date(self, str_date):
return datetime.strptime(str_date, '%b %d, %Y').date()
def load_time_registers(self, filename):
time_results = {'rows_parsed': 0,
'created': 0,
'duplicates': 0,
'sheet': 'NA',
'errors': list()}
row_num = 1
created_count = 0
updated = 0
duplicate_count = 0
errors = list()
with open(filename, 'r', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader, None)
for row in csv_reader:
attributes = dict()
company_id = row[self.time_register_mappings['company_id']]
try:
attributes['employee'] = Employee.objects.get(company_id=company_id)
attributes['date'] = self._parse_date(row[self.time_register_mappings['date']])
regular_hours = parse_hours(row[self.time_register_mappings['regular_hours']])
if regular_hours > 8.0:
raise ValueError(
'Regular hours cannot exceed 8 hours. Your are trying to add %.1f hours' % regular_hours)
register_summary = MaximoTimeRegister.objects.get_employee_total_regular_hours(**attributes)
total_regular_hours = 0
if register_summary['total_regular_hours'] is not None:
total_regular_hours = register_summary['total_regular_hours']
if total_regular_hours + regular_hours <= 8.0:
attributes['pay_rate'] = Decimal(row[self.time_register_mappings['pay_rate']])
ticket_type, number = self._get_maximo_ticket_info(row)
attributes['ticket'] = MaximoTicket.objects.get(ticket_type=ticket_type, number=number)
attributes['regular_hours'] = regular_hours
attributes['defaults'] = {'description': row[self.time_register_mappings['description']]}
register, created = MaximoTimeRegister.objects.get_or_create(**attributes)
if created:
created_count += 1
else:
msg = 'Data on row %d for employee %s ' \
'seems to be duplicated for record %d' % (row_num, attributes['employee'],
register.pk)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Possible duplicate',
'message': msg}
errors.append(error)
duplicate_count += 1
else:
msg = 'Data on row %d for employee %s exceeds ' \
'the maximum regular hour. It would end up having %.1f hours' % (row_num,
attributes['employee'],
total_regular_hours + regular_hours)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Exceed maximum 8 regular hours',
'message': msg}
errors.append(error)
duplicate_count += 1
except Employee.DoesNotExist:
username = row[self.time_register_mappings['username']]
msg = 'Employee with id %s and username %s ' \
'on row %d does not exist time registe was not loaded' % (company_id, username, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Employee does not exist',
'message': msg}
errors.append(error)
except MaximoTicket.DoesNotExist:
msg = '%s with number %s on line %d does not exist' % (ticket_type, number, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Ticket does not exist',
'message': msg}
errors.append(error)
except TypeError as te:
msg = 'Unexpected error %s on row %d' % (te, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Unexeptected Type Error',
'message': msg}
errors.append(error)
except ValueError as ve:
msg = '%s on row %d' % (ve, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Value Error',
'message': msg}
errors.append(error)
row_num += 1
time_results['rows_parsed'] = row_num - 1
time_results['created'] = created_count
time_results['duplicates'] = duplicate_count
time_results['errors'] = errors
return time_results
class MaximoExcelData(AbstractMaximoData):
'''
The loading of Times is based on an export of a report name TINO-NS-FY16.
The columns are:
0 acp_empnum company_id
1 acp_hours regular_hours
2 acp_pagorelevo
3 acp_timingdate date
4 enterby
5 laborcode username
6 memo description
7 payrate pay_rate
8 refwo wo_number
9 regularhrs
10 skilllevel
11 ticketclass ticket_type
12 ticketid ticket_number
'''
def __init__(self, stdout=None):
super(MaximoExcelData, self).__init__(stdout=stdout)
self.ticket_sheet = 'Maximo Tickets'
self.time_sheet = 'Time'
def load(self, filename, action=AbstractMaximoData.LOAD_ALL, allow_update=False, **kwargs):
wb = load_workbook(filename=filename, data_only=True)
ticket_results = dict()
time_results = dict()
if action == self.LOAD_TICKETS:
ticket_results = self.load_tickets(wb, allow_update=allow_update, **kwargs)
elif action == self.LOAD_TIME:
time_results = self.load_time_registers(wb, allow_update=allow_update, **kwargs)
elif action is self.LOAD_ALL:
ticket_results = self.load_tickets(wb, allow_update=allow_update, **kwargs)
time_results = self.load_time_registers(wb, allow_update=allow_update, **kwargs)
else:
raise ValueError('"%s" is an invalid action for load' % action)
return {'ticket_results': ticket_results,
'time_results': time_results}
def save_tickets(self, filename, tickets):
wb = Workbook()
sheet = wb.create_sheet(title=self.ticket_sheet)
row = 1
for v, column in self.ticket_mappings.items():
sheet.cell(column=column + 1, row=row, value=v.upper())
row += 1
for ticket in tickets:
for v, column, in self.ticket_mappings.items():
sheet.cell(column=column + 1, row=row, value=getattr(ticket, v))
row += 1
wb.save(filename)
def export_time_registers(self, filename, registers):
wb = Workbook()
sheet = wb.create_sheet(title=self.time_sheet)
row = 1
headers = ['Company Id', 'Username', 'Date', 'Hours', 'Pay Rate',
'Ticket Type', 'Ticket Number', 'Ticket Name', 'Memo', 'Project', 'Project Source']
column = 1
for header in headers:
sheet.cell(column=column, row=row, value=header)
column += 1
for register in registers:
row += 1
column = 1
sheet.cell(column=column, row=row, value=register.employee.company_id)
column += 1
sheet.cell(column=column, row=row, value=register.employee.user.username)
column += 1
sheet.cell(column=column, row=row, value=register.date)
column += 1
sheet.cell(column=column, row=row, value=register.regular_hours)
column += 1
sheet.cell(column=column, row=row, value=register.pay_rate)
column += 1
sheet.cell(column=column, row=row, value=register.ticket.ticket_type)
column += 1
sheet.cell(column=column, row=row, value=register.ticket.number)
column += 1
sheet.cell(column=column, row=row, value=register.ticket.name)
column += 1
sheet.cell(column=column, row=row, value=register.description)
column += 1
if register.ticket.project:
project_name = register.ticket.project.short_name
else:
project_name=''
sheet.cell(column=column, row=row, value=project_name)
column += 1
sheet.cell(column=column, row=row, value='NA')
wb.save(filename)
def save_time_registers(self, filename, registers):
"""
Saves a queryset of MaximoTimeRegister objects to an excel format that matches the load file format. The load
file forma is explained at the class level documentacions
:param filename: Excel filename to save the MaximoTimeRegister
:param registers: QuerySet of MaximoTimeRegister
:return: None
"""
wb = Workbook()
sheet = wb.create_sheet(title=self.time_sheet)
row = 1
for v, column in self.time_register_mappings.items():
sheet.cell(column=column + 1, row=row, value=v.upper())
row += 1
for register in registers:
col = self.time_register_mappings['company_id'] + 1
sheet.cell(column=col, row=row, value=register.employee.company_id)
col = self.time_register_mappings['regular_hours'] + 1
hours = decimal_to_time(register.regular_hours)
sheet.cell(column=col, row=row, value=hours)
col = self.time_register_mappings['date'] + 1
sheet.cell(column=col, row=row, value=register.date)
col = self.time_register_mappings['username'] + 1
sheet.cell(column=col, row=row, value=register.employee.user.username)
col = self.time_register_mappings['pay_rate'] + 1
sheet.cell(column=col, row=row, value=register.pay_rate)
col = self.time_register_mappings['description'] + 1
sheet.cell(column=col, row=row, value=register.description)
if register.ticket.ticket_type == MaximoTicket.MAXIMO_WORKORDER:
col = self.time_register_mappings['wo_number'] + 1
sheet.cell(column=col, row=row, value=register.ticket.number)
if register.ticket.ticket_type != MaximoTicket.MAXIMO_WORKORDER:
col = self.time_register_mappings['ticket_type'] + 1
sheet.cell(column=col, row=row, value=register.ticket.ticket_type)
col = self.time_register_mappings['ticket_number'] + 1
sheet.cell(column=col, row=row, value=register.ticket.number)
row += 1
wb.save(filename)
def load_time_registers(self, wb, allow_update=False, **kwargs):
sheet_name = kwargs.get('Time', self.time_sheet)
time_sheet = wb[sheet_name]
time_results = {'rows_parsed': 0,
'created': 0,
'duplicates': 0,
'sheet': sheet_name,
'errors': list()}
row_num = 1
created_count = 0
updated = 0
duplicate_count = 0
errors = list()
for row in time_sheet.rows:
if row_num > 1:
attributes = dict()
company_id = row[self.time_register_mappings['company_id']].value
try:
attributes['employee'] = Employee.objects.get(company_id=company_id)
attributes['date'] = row[self.time_register_mappings['date']].value
regular_hours = parse_datetime_hours(row[self.time_register_mappings['regular_hours']].value)
if regular_hours > 8.0:
raise ValueError(
'Regular hours cannot exceed 8 hours. Your are trying to add %.1f hours' % regular_hours)
register_summary = MaximoTimeRegister.objects.get_employee_total_regular_hours(**attributes)
total_regular_hours = 0
if register_summary['total_regular_hours'] is not None:
total_regular_hours = register_summary['total_regular_hours']
if total_regular_hours + regular_hours <= 8.0:
attributes['pay_rate'] = Decimal(row[self.time_register_mappings['pay_rate']].value)
ticket_type, number = self._get_maximo_ticket_info(row)
attributes['ticket'] = MaximoTicket.objects.get(ticket_type=ticket_type, number=number)
attributes['regular_hours'] = regular_hours
attributes['defaults'] = {'description': row[self.time_register_mappings['description']].value}
register, created = MaximoTimeRegister.objects.get_or_create(**attributes)
if created:
created_count += 1
else:
msg = 'Data on row %d for employee %s ' \
'seems to be duplicated for record %d' % (row_num, attributes['employee'],
register.pk)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Possible duplicate',
'message': msg}
errors.append(error)
duplicate_count += 1
else:
msg = 'Data on row %d for employee %s exceeds ' \
'the maximum regular hour. It would end up having %.1f hours' % (row_num,
attributes['employee'],
total_regular_hours + regular_hours)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Exceed maximum 8 regular hours',
'message': msg}
errors.append(error)
duplicate_count += 1
except Employee.DoesNotExist:
username = row[self.time_register_mappings['username']].value
msg = 'Employee with id %s and username %s ' \
'on row %d does not exist time registe was not loaded' % (company_id, username, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Employee does not exist',
'message': msg}
errors.append(error)
except MaximoTicket.DoesNotExist:
msg = '%s with number %s on line %d does not exist' % (ticket_type, number, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Ticket does not exist',
'message': msg}
errors.append(error)
except TypeError as te:
msg = 'Unexpected error %s on row %d' % (te, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Unexeptected Type Error',
'message': msg}
errors.append(error)
except ValueError as ve:
msg = '%s on row %d' % (ve, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Value Error',
'message': msg}
errors.append(error)
row_num += 1
time_results['rows_parsed'] = row_num - 2
time_results['created'] = created_count
time_results['duplicates'] = duplicate_count
time_results['sheet'] = sheet_name
time_results['errors'] = errors
return time_results
def load_tickets(self, wb, allow_update=False, **kwargs):
sheet_name = kwargs.get('ticket_sheet', self.ticket_sheet)
ticket_sheet = wb[sheet_name]
results = {'rows_parsed': 0,
'created': 0,
'updated': 0,
'sheet': sheet_name}
row_num = 1
created_count = 0
updated = 0
for row in ticket_sheet.rows:
if row_num > 1:
data_dictionary = row_to_dictionary(row, self.ticket_mappings)
obj, created = MaximoTicket.objects.get_or_create(ticket_type=data_dictionary['ticket_type'],
number=data_dictionary['number'],
defaults=data_dictionary)
if created:
self.write('%d Created Maximo ticket %s' % (row_num - 1, obj))
logger.debug('%d Created Maximo ticket %s' % (row_num - 1, obj))
created_count += 1
# logger.debug('--- %d tickets created' % created)
elif allow_update:
self.write('%d Update Maximo ticket %s' % (row_num - 1, obj))
logger.debug('%d Update Maximo ticket %s' % (row_num - 1, obj))
updated += 1
else:
logger.debug('%d Existed Maximo ticket %s' % (row_num - 1, obj))
row_num += 1
# logger.debug('%d tickets created' % created)
results = {'rows_parsed': row_num - 2,
'created': created_count,
'updated': updated,
'sheet': sheet_name}
return results
|
py | 1a387538e37cc66b9c243708b159f87a0bfdb12b | # Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import pickle
import typing as T
def rmtrees(build_dir: str, trees: T.List[str]) -> None:
for t in trees:
# Never delete trees outside of the builddir
if os.path.isabs(t):
print(f'Cannot delete dir with absolute path {t!r}')
continue
bt = os.path.join(build_dir, t)
# Skip if it doesn't exist, or if it is not a directory
if os.path.isdir(bt):
shutil.rmtree(bt, ignore_errors=True)
def run(args: T.List[str]) -> int:
if len(args) != 1:
print('Cleaner script for Meson. Do not run on your own please.')
print('cleantrees.py <data-file>')
return 1
with open(args[0], 'rb') as f:
data = pickle.load(f)
rmtrees(data.build_dir, data.trees)
# Never fail cleaning
return 0
if __name__ == '__main__':
run(sys.argv[1:])
|
py | 1a3876358b146fc98ec4c4f06fe3a335b537edf8 | #!/usr/bin/env python
from amuse.community import *
from omuse.community.oifs.interface import OpenIFS
import numpy
import matplotlib.pyplot as plot
import matplotlib.animation as movie
from mpl_toolkits.basemap import Basemap
import os
import sys
import shutil
oifs_procs = 1
oifs = None
def init():
global oifs
oifs = OpenIFS(number_of_workers=oifs_procs,redirection="none")
oifs.initialize_code()
def plotmovie(frames,steps):
size = oifs.itot
x = oifs.longitudes.value_in(units.deg)
y = oifs.latitudes.value_in(units.deg)
fig = plot.figure()
sp = fig.add_subplot(1,1,1)
sp.set_xlabel("T(K)")
sp.set_ylabel("z")
height = numpy.arange(0,oifs.ktot)
def update(i):
t = oifs.get_model_time()
oifs.evolve_model(t + (steps/frames)*oifs.get_timestep())
z = oifs.get_profile_field("T",oifs.itot/2).value_in(units.K)
print z
sp.plot(z,height)
a = movie.FuncAnimation(fig,update,frames=frames,repeat=False)
plot.show()
def main(args):
init()
oifs.commit_parameters()
tim=oifs.get_model_time()
oifs.commit_grid()
plotmovie(frames=10,steps=10.)
oifs.cleanup_code()
if __name__=="__main__":
main(sys.argv[1:])
|
py | 1a3876473fdd87f258d5e2b5fcef193b290478cf | import copy
import sys
from rlpyt.utils.launching.affinity import encode_affinity, quick_affinity_code
from rlpyt.utils.launching.exp_launcher import run_experiments
from rlpyt.utils.launching.variant import VariantLevel, make_variants
args = sys.argv[1:]
assert len(args) == 2
my_computer = int(args[0])
num_computers = int(args[1])
print(f"MY_COMPUTER: {my_computer}, NUM_COMPUTERS: {num_computers}")
script = (
"rlpyt/ul/experiments/rl_from_ul/scripts/atari/train/atari_ppo_from_ul_serial.py"
)
affinity_code = quick_affinity_code(contexts_per_gpu=3)
runs_per_setting = 3
experiment_title = "ppo_from_atc_1"
variant_levels_1 = list()
variant_levels_2 = list()
# variant_levels_3 = list()
learning_rates = [1e-3]
values = list(zip(learning_rates))
dir_names = ["{}lr".format(*v) for v in values]
keys = [("pretrain", "learning_rate")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# games = ["pong", "qbert", "seaquest", "space_invaders",
# "alien", "breakout", "frostbite", "gravitar"]
games = ["alien", "frostbite", "pong", "seaquest"]
values = list(zip(games))
dir_names = games
keys = [("env", "game")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
# variant_levels_3.append(VariantLevel(keys, values, dir_names))
##################################################
# RL CONFIG (mostly)
n_steps = [25e6]
pretrain_algos = ["ATC"]
replays = ["20200608/15M_VecEps_B78"]
model_dirs = ["/data/adam/ul4rl/models/20200901/atari_atc_ul_single/"]
values = list(
zip(
n_steps,
pretrain_algos,
replays,
model_dirs,
)
)
dir_names = ["RlFromUl"] # TRAIN SCRIPT SPLITS OFF THIS
keys = [
("runner", "n_steps"),
("pretrain", "algo"),
("pretrain", "replay"),
("pretrain", "model_dir"),
]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
stop_conv_grads = [True]
hidden_sizes = [512]
values = list(zip(stop_conv_grads, hidden_sizes))
dir_names = ["{}_stpcnvgrd_{}hs".format(*v) for v in values]
keys = [("model", "stop_conv_grad"), ("model", "hidden_sizes")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
variants_1, log_dirs_1 = make_variants(*variant_levels_1)
# variants_2, log_dirs_2 = make_variants(*variant_levels_2)
variants = variants_1 # + variants_2
log_dirs = log_dirs_1 # + log_dirs_2
num_variants = len(variants)
variants_per = num_variants // num_computers
my_start = my_computer * variants_per
if my_computer == num_computers - 1:
my_end = num_variants
else:
my_end = (my_computer + 1) * variants_per
my_variants = variants[my_start:my_end]
my_log_dirs = log_dirs[my_start:my_end]
default_config_key = "ppo_16env"
run_experiments(
script=script,
affinity_code=affinity_code,
experiment_title=experiment_title,
runs_per_setting=runs_per_setting,
variants=my_variants,
log_dirs=my_log_dirs,
common_args=(default_config_key, experiment_title),
)
|
py | 1a3876becbab265fc3dec66d69cee14d374c1974 | import torch
import torch.nn as nn
import physics_aware_training.digital_twin_utils
class DNN(nn.Module):
def __init__(self, input_dim, nparams, output_dim, Nunits = None, batchnorm = False, nlaf = 'relu', **kwargs):
'''
Defines configurable deep neural network with fully connected layers and a choice of
nonlinear activation functions.
Args:
input_dim (int): dimension of input layer
output_dim (int): dimension of output layer
Nunits (list of int): dimensions of hidden layers
batchnorm (bool): determines whether to use batchnorm between each hidden layer.
The order in which batchnorm is applied is:
fully connected layer - batchnorm - nonlinear activation function
nlaf (string): determines the nonlinear activation function. Choices:
'relu', 'tanh', 'sigmoid'
'''
super(DNN, self).__init__()
if Nunits == None:
Nunits = [100, 100]
self.batchnorm = batchnorm
self.nlaf = nlaf
Nunits.insert(0, input_dim + nparams)
self.layers = nn.ModuleList([])
for i in range(len(Nunits) - 1):
self.layers.append(nn.Linear(Nunits[i], Nunits[i+1]))
self.outputlayer = nn.Linear(Nunits[-1], output_dim)
if batchnorm:
self.batchnorms = nn.ModuleList([])
for i in range(len(Nunits)-1):
self.batchnorms.append(nn.BatchNorm1d(Nunits[i+1]))
def forward(self, x):
'''
Performs the forward pass through the network.
Args:
x (float tensor): inputs of dimension [batch_size, input_dim + nparams]
'''
if self.nlaf == 'relu':
nlaf = torch.relu
elif self.nlaf == 'tanh':
nlaf = torch.tanh
elif self.nlaf == 'sigmoid':
nlaf = torch.sigmoid
for i, layer in enumerate(self.layers):
x = layer(x)
if self.batchnorm:
x = self.batchnorms[i](x)
x = nlaf(x)
return self.outputlayer(x)
class DNNObjective(object):
# define class to smuggle additional arguments into objective function
def __init__(self, train_loader, test_loader, dt_path,
input_dim, nparams, output_dim, **modelargs):
'''
Defines an optuna objective which optimizes hyperparameters drawn from the
distribution defined in __call__.
Args:
dt_path (string): Location at which best model will be saved
'''
self.modelargs = modelargs
self.dt_path = dt_path
self.train_loader = train_loader
self.test_loader = test_loader
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
def __call__(self, trial):
Nlayers = trial.suggest_categorical("Nlayers", [1, 2, 3, 4, 5])
lr = trial.suggest_loguniform("lr", 1e-4, 1e-1)
Nunits = []
if Nlayers == 1:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
if Nlayers == 2:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
if Nlayers == 3:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
if Nlayers == 4:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
if Nlayers == 5:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits5", 50, 1000)))
name = f"{self.dt_path}_v{trial.number}" #create name with trial index
value, model_path = physics_aware_training.digital_twin_utils.train_loop_reg_model(
self.train_loader,
self.test_loader,
name,
self.input_dim,
self.nparams,
self.output_dim,
Model = DNN,
Nunits = Nunits,
Nlayers = Nlayers,
lr = lr,
trial = trial,
**self.modelargs)
trial.set_user_attr('model_path', model_path) #save the model path string in NAS study
return value |
py | 1a387740d274ad592f5924fd070a24243e888caa | # Copyright (c) 2012-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Divi base58 encoding and decoding.
Based on https://divitalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Divi does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/divi/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
py | 1a38793b0a7e66e930ee003c453304356da04fd1 | """Python wrapper for Breeze ChMS API: http://www.breezechms.com/api
This API wrapper allows churches to build custom functionality integrated with
Breeze Church Management System.
Usage:
from breeze import breeze
breeze_api = breeze.BreezeApi(
breeze_url='https://demo.breezechms.com',
api_key='5c2d2cbacg3...')
people = breeze_api.get_people();
for person in people:
print '%s %s' % (person['first_name'], person['last_name'])
"""
__author__ = '[email protected] (Alex Ortiz-Rosado)'
import logging
import requests
from .utils import make_enum
ENDPOINTS = make_enum(
'BreezeApiURL',
PEOPLE='/api/people',
EVENTS='/api/events',
PROFILE_FIELDS='/api/profile',
CONTRIBUTIONS='/api/giving',
FUNDS='/api/funds',
PLEDGES='/api/pledges',
TAGS='/api/tags',
ACCOUNT_SUMMARY='/api/account/summary')
class BreezeError(Exception):
"""Exception for BreezeApi."""
pass
class BreezeApi(object):
"""A wrapper for the Breeze REST API."""
def __init__(self, breeze_url, api_key,
dry_run=False,
connection=requests.Session()):
"""Instantiates the BreezeApi with your Breeze account information.
Args:
breeze_url: Fully qualified domain for your organizations Breeze
service.
api_key: Unique Breeze API key. For instructions on finding your
organizations API key, see:
http://breezechms.com/docs#extensions_api
dry_run: Enable no-op mode, which disables requests from being made.
When combined with debug, this allows debugging requests
without affecting data in your Breeze account."""
self.breeze_url = breeze_url
self.api_key = api_key
self.dry_run = dry_run
self.connection = connection
# TODO(alex): use urlparse to check url format.
if not (self.breeze_url and self.breeze_url.startswith('https://') and
self.breeze_url.find('.breezechms.')):
raise BreezeError('You must provide your breeze_url as ',
'subdomain.breezechms.com')
if not self.api_key:
raise BreezeError('You must provide an API key.')
def _request(self, endpoint, params=None, headers=None, timeout=60):
"""Makes an HTTP request to a given url.
Args:
endpoint: URL where the service can be accessed.
params: Query parameters to append to endpoint url.
headers: HTTP headers; used for authenication parameters.
timeout: Timeout in seconds for HTTP request.
Returns:
HTTP response
Throws:
BreezeError if connection or request fails."""
if headers is None:
headers = {}
headers.update({
'Content-Type': 'application/json',
'Api-Key': self.api_key}
)
if params is None:
params = {}
keywords = dict(params=params, headers=headers, timeout=timeout)
url = '%s%s' % (self.breeze_url, endpoint)
logging.debug('Making request to %s', url)
if self.dry_run:
return
response = self.connection.get(url, verify=True, **keywords)
try:
response = response.json()
except requests.ConnectionError as error:
raise BreezeError(error)
else:
if not self._request_succeeded(response):
raise BreezeError(response)
logging.debug('JSON Response: %s', response)
return response
def _request_succeeded(self, response):
"""Predicate to ensure that the HTTP request succeeded."""
if isinstance(response, bool):
return response
else:
return not (('errors' in response) or ('errorCode' in response))
def get_account_summary(self):
"""Retrieve the details for a specific account using the API key
and URL. It can also work to see if the key and URL are valid.
Returns:
JSON response. For example:
{
"id":"1234",
"name":"Grace Church",
"subdomain":"gracechurchdemo",
"status":"1",
"created_on":"2018-09-10 09:19:35",
"details":{
"timezone":"America\/New_York",
"country":{
"id":"2",
"name":"United States of America",
"abbreviation":"USA",
"abbreviation_2":"US",
"currency":"USD",
"currency_symbol":"$",
"date_format":"MDY",
"sms_prefix":"1"
}
}
}
"""
return self._request(ENDPOINTS.ACCOUNT_SUMMARY)
def get_people(self, limit=None, offset=None, details=False):
"""List people from your database.
Args:
limit: Number of people to return. If None, will return all people.
offset: Number of people to skip before beginning to return results.
Can be used in conjunction with limit for pagination.
details: Option to return all information (slower) or just names.
returns:
JSON response. For example:
{
"id":"157857",
"first_name":"Thomas",
"last_name":"Anderson",
"path":"img\/profiles\/generic\/blue.jpg"
},
{
"id":"157859",
"first_name":"Kate",
"last_name":"Austen",
"path":"img\/profiles\/upload\/2498d7f78s.jpg"
},
{
...
}"""
params = []
if limit:
params.append('limit=%s' % limit)
if offset:
params.append('offset=%s' % offset)
if details:
params.append('details=1')
return self._request('%s/?%s' % (ENDPOINTS.PEOPLE, '&'.join(params)))
def get_profile_fields(self):
"""List profile fields from your database.
Returns:
JSON response."""
return self._request(ENDPOINTS.PROFILE_FIELDS)
def get_person_details(self, person_id):
"""Retrieve the details for a specific person by their ID.
Args:
person_id: Unique id for a person in Breeze database.
Returns:
JSON response."""
return self._request('%s/%s' % (ENDPOINTS.PEOPLE, str(person_id)))
def add_person(self, first_name, last_name, fields_json=None):
"""Adds a new person into the database.
Args:
first_name: The first name of the person.
last_name: The first name of the person.
fields_json: JSON string representing an array of fields to update.
Each array element must contain field id, field type, response,
and in some cases, more information.
ie. [ {
"field_id":"929778337",
"field_type":"email",
"response":"true",
"details":{
"address": "[email protected]",
"is_private":1}
}
].
Obtain such field information from get_profile_fields() or
use get_person_details() to see fields that already exist for a specific person.
Returns:
JSON response equivalent to get_person_details()."""
params = []
params.append('first=%s' % first_name)
params.append('last=%s' % last_name)
if fields_json:
params.append('fields_json=%s' % fields_json)
return self._request('%s/add?%s' % (ENDPOINTS.PEOPLE, '&'.join(params)))
def update_person(self, person_id, fields_json):
"""Updates the details for a specific person in the database.
Args:
person_id: Unique id for a person in Breeze database.
fields_json: JSON string representing an array of fields to update.
Each array element must contain field id, field type, response,
and in some cases, more information.
ie. [ {
"field_id":"929778337",
"field_type":"email",
"response":"true",
"details":{
"address": "[email protected]",
"is_private":1}
}
].
Obtain such field information from get_profile_fields() or
use get_person_details() to see fields that already exist for a specific person.
Returns:
JSON response equivalent to get_person_details(person_id)."""
return self._request(
'%s/update?person_id=%s&fields_json=%s' % (
ENDPOINTS.PEOPLE, person_id, fields_json
))
def get_events(self, start_date=None, end_date=None):
"""Retrieve all events for a given date range.
Args:
start_date: Start date; defaults to first day of the current month.
end_date: End date; defaults to last day of the current month
Returns:
JSON response."""
params = []
if start_date:
params.append('start=%s' % start_date)
if end_date:
params.append('end=%s' % end_date)
return self._request('%s/?%s' % (ENDPOINTS.EVENTS, '&'.join(params)))
def event_check_in(self, person_id, event_instance_id):
"""Checks in a person into an event.
Args:
person_id: id for a person in Breeze database.
event_instance_id: id for event instance to check into.."""
return self._request(
'%s/attendance/add?person_id=%s&instance_id=%s' % (
ENDPOINTS.EVENTS, str(person_id), str(event_instance_id)
))
def event_check_out(self, person_id, event_instance_id):
"""Remove the attendance for a person checked into an event.
Args:
person_id: Breeze ID for a person in Breeze database.
event_instance_id: id for event instance to check out (delete).
Returns:
True if check-out succeeds; False if check-out fails."""
return self._request(
'%s/attendance/delete?person_id=%s&instance_id=%s' % (
ENDPOINTS.EVENTS, str(person_id), str(event_instance_id)
))
def add_contribution(self,
date=None,
name=None,
person_id=None,
uid=None,
processor=None,
method=None,
funds_json=None,
amount=None,
group=None,
batch_number=None,
batch_name=None):
"""Add a contribution to Breeze.
Args:
date: Date of transaction in DD-MM-YYYY format (ie. 24-5-2015)
name: Name of person that made the transaction. Used to help match up
contribution to correct profile within Breeze. (ie. John Doe)
person_id: The Breeze ID of the donor. If unknown, use UID instead of
person id (ie. 1234567)
uid: The unique id of the person sent from the giving platform. This
should be used when the Breeze ID is unknown. Within Breeze a
user will be able to associate this ID with a given Breeze ID.
(ie. 9876543)
email: Email address of donor. If no person_id is provided, used to
help automatically match the person to the correct profile.
(ie. [email protected])
street_address: Donor's street address. If person_id is not provided,
street_address will be used to help automatically
match the person to the correct profile.
(ie. 123 Sample St)
processor: The name of the processor used to send the payment. Used
in conjunction with uid. Not needed if using Breeze ID.
(ie. SimpleGive, BluePay, Stripe)
method: The payment method. (ie. Check, Cash, Credit/Debit Online,
Credit/Debit Offline, Donated Goods (FMV), Stocks (FMV),
Direct Deposit)
funds_json: JSON string containing fund names and amounts. This
allows splitting fund giving. The ID is optional. If
present, it must match an existing fund ID and it will
override the fund name.
ie. [ {
'id':'12345',
'name':'General Fund',
'amount':'100.00'
},
{
'name':'Missions Fund',
'amount':'150.00'
}
]
amount: Total amount given. Must match sum of amount in funds_json.
group: This will create a new batch and enter all contributions with
the same group into the new batch. Previous groups will be
remembered and so they should be unique for every new batch.
Use this if wanting to import into the next batch number in a
series.
batch_number: The batch number to import contributions into. Use
group instead if you want to import into the next batch
number.
batch_name: The name of the batch. Can be used with batch number or
group.
Returns:
Payment Id.
Throws:
BreezeError on failure to add contribution."""
params = []
if date:
params.append('date=%s' % date)
if name:
params.append('name=%s' % name)
if person_id:
params.append('person_id=%s' % person_id)
if uid:
params.append('uid=%s' % uid)
if processor:
params.append('processor=%s' % processor)
if method:
params.append('method=%s' % method)
if funds_json:
params.append('funds_json=%s' % funds_json)
if amount:
params.append('amount=%s' % amount)
if group:
params.append('group=%s' % group)
if batch_number:
params.append('batch_number=%s' % batch_number)
if batch_name:
params.append('batch_name=%s' % batch_name)
response = self._request('%s/add?%s' % (ENDPOINTS.CONTRIBUTIONS,
'&'.join(params)))
return response['payment_id']
def edit_contribution(self,
payment_id=None,
date=None,
name=None,
person_id=None,
uid=None,
processor=None,
method=None,
funds_json=None,
amount=None,
group=None,
batch_number=None,
batch_name=None):
"""Edit an existing contribution.
Args:
payment_id: The ID of the payment that should be modified.
date: Date of transaction in DD-MM-YYYY format (ie. 24-5-2015)
name: Name of person that made the transaction. Used to help match up
contribution to correct profile within Breeze. (ie. John Doe)
person_id: The Breeze ID of the donor. If unknown, use UID instead of
person id (ie. 1234567)
uid: The unique id of the person sent from the giving platform. This
should be used when the Breeze ID is unknown. Within Breeze a
user will be able to associate this ID with a given Breeze ID.
(ie. 9876543)
email: Email address of donor. If no person_id is provided, used to
help automatically match the person to the correct profile.
(ie. [email protected])
street_address: Donor's street address. If person_id is not provided,
street_address will be used to help automatically
match the person to the correct profile.
(ie. 123 Sample St)
processor: The name of the processor used to send the payment. Used
in conjunction with uid. Not needed if using Breeze ID.
(ie. SimpleGive, BluePay, Stripe)
method: The payment method. (ie. Check, Cash, Credit/Debit Online,
Credit/Debit Offline, Donated Goods (FMV), Stocks (FMV),
Direct Deposit)
funds_json: JSON string containing fund names and amounts. This
allows splitting fund giving. The ID is optional. If
present, it must match an existing fund ID and it will
override the fund name.
ie. [ {
'id':'12345',
'name':'General Fund',
'amount':'100.00'
},
{
'name':'Missions Fund',
'amount':'150.00'
}
]
amount: Total amount given. Must match sum of amount in funds_json.
group: This will create a new batch and enter all contributions with
the same group into the new batch. Previous groups will be
remembered and so they should be unique for every new batch.
Use this if wanting to import into the next batch number in a
series.
batch_number: The batch number to import contributions into. Use
group instead if you want to import into the next batch
number.
batch_name: The name of the batch. Can be used with batch number or
group.
Returns:
Payment id.
Throws:
BreezeError on failure to edit contribution."""
params = []
if payment_id:
params.append('payment_id=%s' % payment_id)
if date:
params.append('date=%s' % date)
if name:
params.append('name=%s' % name)
if person_id:
params.append('person_id=%s' % person_id)
if uid:
params.append('uid=%s' % uid)
if processor:
params.append('processor=%s' % processor)
if method:
params.append('method=%s' % method)
if funds_json:
params.append('funds_json=%s' % funds_json)
if amount:
params.append('amount=%s' % amount)
if group:
params.append('group=%s' % group)
if batch_number:
params.append('batch_number=%s' % batch_number)
if batch_name:
params.append('batch_name=%s' % batch_name)
response = self._request('%s/edit?%s' % (ENDPOINTS.CONTRIBUTIONS,
'&'.join(params)))
return response['payment_id']
def delete_contribution(self, payment_id):
"""Delete an existing contribution.
Args:
payment_id: The ID of the payment that should be deleted.
Returns:
Payment id.
Throws:
BreezeError on failure to delete contribution."""
response = self._request('%s/delete?payment_id=%s' % (
ENDPOINTS.CONTRIBUTIONS, payment_id
))
return response['payment_id']
def list_contributions(self,
start_date=None,
end_date=None,
person_id=None,
include_family=False,
amount_min=None,
amount_max=None,
method_ids=None,
fund_ids=None,
envelope_number=None,
batches=None,
forms=None):
"""Retrieve a list of contributions.
Args:
start_date: Find contributions given on or after a specific date
(ie. 2015-1-1); required.
end_date: Find contributions given on or before a specific date
(ie. 2018-1-31); required.
person_id: ID of person's contributions to fetch. (ie. 9023482)
include_family: Include family members of person_id (must provide
person_id); default: False.
amount_min: Contribution amounts equal or greater than.
amount_max: Contribution amounts equal or less than.
method_ids: List of method IDs.
fund_ids: List of fund IDs.
envelope_number: Envelope number.
batches: List of Batch numbers.
forms: List of form IDs.
Returns:
List of matching contributions.
Throws:
BreezeError on malformed request."""
params = []
if start_date:
params.append('start=%s' % start_date)
if end_date:
params.append('end=%s' % end_date)
if person_id:
params.append('person_id=%s' % person_id)
if include_family:
if not person_id:
raise BreezeError('include_family requires a person_id.')
params.append('include_family=1')
if amount_min:
params.append('amount_min=%s' % amount_min)
if amount_max:
params.append('amount_max=%s' % amount_max)
if method_ids:
params.append('method_ids=%s' % '-'.join(method_ids))
if fund_ids:
params.append('fund_ids=%s' % '-'.join(fund_ids))
if envelope_number:
params.append('envelope_number=%s' % envelope_number)
if batches:
params.append('batches=%s' % '-'.join(batches))
if forms:
params.append('forms=%s' % '-'.join(forms))
return self._request('%s/list?%s' % (ENDPOINTS.CONTRIBUTIONS,
'&'.join(params)))
def list_funds(self, include_totals=False):
"""List all funds.
Args:
include_totals: Amount given to the fund should be returned.
Returns:
JSON Reponse."""
params = []
if include_totals:
params.append('include_totals=1')
return self._request('%s/list?%s' %
(ENDPOINTS.FUNDS, '&'.join(params)))
def list_campaigns(self):
"""List of campaigns.
Returns:
JSON response."""
return self._request('%s/list_campaigns' % (ENDPOINTS.PLEDGES))
def list_pledges(self, campaign_id):
"""List of pledges within a campaign.
Args:
campaign_id: ID number of a campaign.
Returns:
JSON response."""
return self._request('%s/list_pledges?campaign_id=%s' % (
ENDPOINTS.PLEDGES, campaign_id
))
def get_tags(self, folder=None):
"""List of tags
Args:
folder: If set, only return tags in this folder id
Returns:
JSON response. For example:
[
{
"id":"523928",
"name":"4th & 5th",
"created_on":"2018-09-10 09:19:40",
"folder_id":"1539"
},
{
"id":"51994",
"name":"6th Grade",
"created_on":"2018-02-06 06:40:40",
"folder_id":"1539"
},
{ ... }
]"""
params = []
if folder:
params.append('folder_id=%s' % folder)
return self._request('%s/%s/?%s' % (ENDPOINTS.TAGS, 'list_tags', '&'.join(params)))
def get_tag_folders(api):
"""List of tag folders
Args: (none)
Returns:
JSON response, for example:
[
{
"id":"1234567",
"parent_id":"0",
"name":"All Tags",
"created_on":"2018-06-05 18:12:34"
},
{
"id":"8234253",
"parent_id":"120425",
"name":"Kids",
"created_on":"2018-06-05 18:12:10"
},
{
"id":"1537253",
"parent_id":"5923042",
"name":"Small Groups",
"created_on":"2018-09-10 09:19:40"
},
{
"id":"20033",
"parent_id":"20031",
"name":"Student Ministries",
"created_on":"2018-12-15 18:11:31"
}
]"""
return api._request("%s/%s" % (ENDPOINTS.TAGS, "list_folders"))
def assign_tag(self,
person_id,
tag_id):
"""
Update a person's tag/s.
params:
person_id: an existing person's user id
tag_id: the id number of the tag you want to assign to the user
output: true or false upon success or failure of tag update
"""
params = []
params.append('person_id=%s' % person_id)
params.append('tag_id=%s' % tag_id)
response = self._request('%s/assign?%s' %
(ENDPOINTS.TAGS, '&'.join(params)))
return response
def unassign_tag(self,
person_id,
tag_id):
"""
Delete a person's tag/s.
params:
person_id: an existing person's user id
tag_id: the id number of the tag you want to assign to the user
output: true or false upon success or failure of tag deletion
"""
params = []
params.append('person_id=%s' % person_id)
params.append('tag_id=%s' % tag_id)
response = self._request('%s/unassign?%s' %
(ENDPOINTS.TAGS, '&'.join(params)))
return response
|
py | 1a387975908c50c01a4b95948c2f3ee25d48a66e | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all disapproved ads for a given campaign with AWQL.
To add an ad, run add_ads.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201702')
# Construct query and get all ads for a given campaign.
query = ('SELECT Id, AdGroupAdDisapprovalReasons '
'WHERE CampaignId = %s AND '
'AdGroupCreativeApprovalStatus = DISAPPROVED '
'ORDER BY Id' % campaign_id)
ads = ad_group_ad_service.query(query)
# Display results.
if 'entries' in ads:
for ad in ads['entries']:
print ('Ad with id \'%s\' was disapproved for the following reasons: '
% (ad['ad']['id']))
if ad['ad'].get('disapprovalReasons'):
for reason in ad['ad']['disapprovalReasons']:
print '\t%s' % reason
else:
print '\tReason not provided.'
else:
print 'No disapproved ads were found.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
|
py | 1a3879bb247b6db70dbbec59360d3a270a95b883 | # Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from unittest import mock
except ImportError:
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.tests.unit import base
from a10_octavia.common import config_options
from a10_octavia.controller.worker.flows import a10_load_balancer_flows
from a10_octavia.tests.common import a10constants
RACK_DEVICE = {
"project_id": "project-rack-vthunder",
"ip_address": "10.0.0.1",
"device_name": "rack_vthunder",
"username": "abc",
"password": "abc",
"interface_vlan_map": {"1": {"11": {"use_dhcp": True}, "12": {"use_dhcp": True}}}
}
RACK_DEVICE_LIST = {
"project-rack-vthunder": {
"project_id": "project-rack-vthunder",
"ip_address": "10.0.0.1",
"device_name": "rack_vthunder",
"username": "abc",
"password": "abc",
"interface_vlan_map": {"1": {"11": {"use_dhcp": True}, "12": {"use_dhcp": True}}}
},
"[dev]rack_vthunder": {
"project_id": "project-rack-vthunder",
"ip_address": "10.0.0.1",
"device_name": "rack_vthunder",
"username": "abc",
"password": "abc",
"interface_vlan_map": {"1": {"11": {"use_dhcp": True}, "12": {"use_dhcp": True}}}
}
}
@mock.patch("octavia.controller.worker.v1.tasks.database_tasks.UpdateAmphoraVIPData")
class TestLoadBalancerFlows(base.TestCase):
def setUp(self):
super(TestLoadBalancerFlows, self).setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker")
# amphora_driver='a10')
self.conf.config(group="nova", enable_anti_affinity=False)
self.flows = a10_load_balancer_flows.LoadBalancerFlows()
def tearDown(self):
super(TestLoadBalancerFlows, self).tearDown()
self.conf.reset()
def test_create_lb_flows(self, mock_net_driver):
lb = o_data_models.LoadBalancer(id=a10constants.MOCK_LOAD_BALANCER_ID,
project_id='project-vthunder')
(create_flow, store) = self.flows.get_delete_load_balancer_flow(lb, False, False)
self.assertIsInstance(create_flow, flow.Flow)
def test_create_lb_rack_vthunder_vlan_flow(self, mock_net_driver):
self.conf.register_opts(config_options.A10_GLOBAL_OPTS,
group=a10constants.A10_GLOBAL_CONF_SECTION)
self.conf.config(group=a10constants.A10_GLOBAL_CONF_SECTION, network_type='vlan')
target = self.flows.get_create_rack_vthunder_load_balancer_flow(
RACK_DEVICE, RACK_DEVICE_LIST, constants.TOPOLOGY_SINGLE)
self.assertIsInstance(target, flow.Flow)
def test_delete_lb_rack_vthunder_vlan_flow(self, mock_net_driver):
self.conf.register_opts(config_options.A10_GLOBAL_OPTS,
group=a10constants.A10_GLOBAL_CONF_SECTION)
self.conf.register_opts(config_options.A10_HARDWARE_THUNDER_OPTS,
group=a10constants.A10_HARDWARE_THUNDER_CONF_SECTION)
self.conf.config(group=a10constants.A10_GLOBAL_CONF_SECTION, network_type='vlan')
self.conf.config(group=a10constants.A10_HARDWARE_THUNDER_CONF_SECTION,
devices=[RACK_DEVICE])
lb = o_data_models.LoadBalancer(id=a10constants.MOCK_LOAD_BALANCER_ID,
project_id='project-rack-vthunder')
(del_flow, store) = self.flows.get_delete_load_balancer_flow(lb, False, False)
self.assertIsInstance(del_flow, flow.Flow)
|
py | 1a387b276c5bf33139737015ef35a7cb140556d4 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the neural network.
"""
from __future__ import print_function
import os
import inspect
import warnings
import numpy as np
import six
import paddle
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program
from .. import dygraph_utils
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor
from . import utils
from .. import unique_name
from functools import reduce
from .. import core
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
__all__ = [
'fc',
'embedding',
'linear_chain_crf',
'crf_decoding',
'cos_sim',
'chunk_eval',
'conv2d',
'conv3d',
'softmax',
'pool2d',
'pool3d',
'adaptive_pool2d',
'adaptive_pool3d',
'batch_norm',
'inplace_abn',
'instance_norm',
'data_norm',
'conv2d_transpose',
'conv3d_transpose',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'reduce_prod',
'reduce_all',
'reduce_any',
'dropout',
'split',
'ctc_greedy_decoder',
'l2_normalize',
'matmul',
'topk',
'transpose',
'im2sequence',
'row_conv',
'multiplex',
'layer_norm',
'group_norm',
'spectral_norm',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'reshape',
'squeeze',
'unsqueeze',
'lod_reset',
'lod_append',
'lrn',
'pad',
'pad_constant_like',
'label_smooth',
'roi_pool',
'roi_align',
'dice_loss',
'image_resize',
'image_resize_short',
'resize_bilinear',
'resize_trilinear',
'resize_nearest',
'gather',
'gather_nd',
'scatter',
'scatter_nd_add',
'scatter_nd',
'random_crop',
'mean_iou',
'relu',
'selu',
'log',
'crop',
'crop_tensor',
'elu',
'relu6',
'pow',
'stanh',
'hard_sigmoid',
'swish',
'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten',
'stack',
'pad2d',
'unstack',
'unique',
'unique_with_counts',
'expand',
'expand_as',
'scale',
'elementwise_add',
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'elementwise_max',
'elementwise_min',
'elementwise_pow',
'elementwise_mod',
'elementwise_floordiv',
'uniform_random_batch_size_like',
'gaussian_random',
'sampling_id',
'gaussian_random_batch_size_like',
'sum',
'slice',
'strided_slice',
'shape',
'rank',
'size',
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
'clip',
'clip_by_norm',
'mean',
'mul',
'maxout',
'space_to_depth',
'affine_grid',
'affine_channel',
'similarity_focus',
'hash',
'grid_sampler',
'log_loss',
'add_position_encoding',
'bilinear_tensor_product',
'merge_selected_rows',
'get_tensor_from_selected_rows',
'shuffle_channel',
'temporal_shift',
'py_func',
'psroi_pool',
'prroi_pool',
'pixel_shuffle',
'fsp_matrix',
'continuous_value_model',
'where',
'sign',
'deformable_conv',
'unfold',
'deformable_roi_pooling',
'filter_by_instag',
'shard_index',
'hard_swish',
'gather_tree',
'uniform_random',
]
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(core.ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
**Fully Connected Layer**
This operator creates a fully connected layer in the network. It can take
a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
Args in detail). It creates a variable called weight for each input Tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input Tensor
with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
where M is batch size. If a list of Tensor is given, the results of
multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
is not None, a bias variable will be created and added to the output.
Finally, if :attr:`act` is not None, it will be applied to the output as well.
When the input is a single Tensor(or LoDTensor):
.. math::
Out = Act({XW + b})
When the input is a list of Tensor(or LoDTensor):
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
* :math:`X_i`: The i-th input tensor.
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output Tensor.
.. code-block:: text
Case 1:
Given a single Tensor data_1, and num_flatten_dims = 2:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)
Then output is:
out.data = [[0.83234344], [0.34936576]]
out.shape = (1, 2, 1)
Case 2:
Given a list of Tensor:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
data_2 = [[[0.1, 0.2, 0.3]]]
data_2.shape = (1, 1, 3)
out = fluid.layers.fc(input=[data_1, data_2], size=2)
Then:
out.data = [[0.18669507, 0.1893476]]
out.shape = (1, 2)
Args:
input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
type should be float32 or float64.
size(int): The number of output units in this layer, which also means the feature size of output
Tensor(or LoDTensor).
num_flatten_dims (int): The fc layer can accept an input Tensor with more than
two dimensions. If this happens, the multidimensional tensor will first be flattened
into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, assuming that
X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
Raises:
ValueError: If dimensions of the input Tensor is less than 2.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# when input is single tensor
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
# when input are multiple tensors
data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32")
data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32")
fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
"""
helper = LayerHelper("fc", **locals())
check_type(input, 'input', (list, tuple, Variable), 'fc')
if isinstance(input, (list, tuple)):
for i, input_x in enumerate(input):
check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc')
dtype = helper.input_dtype()
check_dtype(dtype, 'input', ['float16', 'float32', 'float64'], 'fc')
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
if num_flatten_dims == -1:
num_flatten_dims = len(input_shape) - 1
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="mul",
inputs={"X": input_var,
"Y": w},
outputs={"Out": tmp},
attrs={"x_num_col_dims": num_flatten_dims,
"y_num_col_dims": 1})
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
# add bias
pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims)
# add activation
return helper.append_activation(pre_activation)
def embedding(input,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
"""
**WARING:** This OP will be deprecated in a future release. This OP requires the
last dimension of Tensor shape must be equal to 1. It is recommended to use
fluid. :ref:`api_fluid_embedding` .
The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
This OP requires the last dimension of Tensor shape must be equal to 1. The shape
of output Tensor is generated by replacing the last dimension of the input Tensor shape
with emb_size.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
input.shape = [3, 2, 1]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Case 2:
input is a LoDTensor with 1-level LoD. padding_idx = 0
input.lod = [[2, 3]]
input.data = [[1], [3], [2], [4], [0]]
input.shape = [5, 1]
Given size = [128, 16]
output is a LoDTensor:
out.lod = [[2, 3]]
out.shape = [5, 16]
out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654],
[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]] # padding data
It will pad all-zero data when ids is 0.
Args:
input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
The last dimension of Tensor shape must be equal to 1. The value of the input id should
satisfy :math:`0<= id < size[0]` .
size(tuple|list): The shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
It must be float32 or float64. Default: float32.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
# example 1
emb_1 = fluid.embedding(input=data, size=[128, 64])
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
"""
helper = LayerHelper('embedding', **locals())
check_variable_and_dtype(input, 'input', ['int64'],
'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding')
remote_prefetch = is_sparse and (not is_distributed)
if remote_prefetch:
assert is_sparse is True and is_distributed is False
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={
'is_sparse': is_sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
def _pull_sparse(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the fleet table id of this embedding.
accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_sparse_v2(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the pslib table id of this embedding.
accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse_v2(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse_v2',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_box_sparse(input, size, dtype='float32'):
"""
**Pull Box Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
BoxPS lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.pull_box_sparse(input=data, size=[11])
"""
helper = LayerHelper('pull_box_sparse', **locals())
if dtype != 'float32':
raise ValueError(
"BoxPS only support float type embedding now, and your type is: " +
dtype)
helper.input_dtype()
inputs = helper.multiple_input()
outs = [
helper.create_variable_for_type_inference(dtype)
for i in range(len(inputs))
]
helper.append_op(
type='pull_box_sparse',
inputs={'Ids': inputs},
outputs={'Out': outs},
attrs={'size': size})
if len(outs) == 1:
return outs[0]
return outs
@templatedoc()
def linear_chain_crf(input, label, param_attr=None, length=None):
"""
Linear Chain CRF.
${comment}
Args:
input(${emission_type}): ${emission_comment}
label(${label_type}): ${label_comment}
Length(${length_type}): ${length_comment}
param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter.
Returns:
output(${emission_exps_type}): ${emission_exps_comment} \n
output(${transition_exps_type}): ${transition_exps_comment} \n
output(${log_likelihood_type}): ${log_likelihood_comment} \n
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
#define net structure, using LodTensor
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32')
label = fluid.data(name='label', shape=[-1,1], dtype='int')
emission= fluid.layers.fc(input=input_data, size=10, act="tanh")
crf_cost = fluid.layers.linear_chain_crf(
input=emission,
label=label,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using LoDTensor
a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place)
b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place)
feed1 = {'input_data':a,'label':b}
loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost])
print(loss)
#define net structure, using padding
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32')
label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int')
label_length = fluid.data(name='length', shape=[-1,1], dtype='int')
emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2)
crf_cost2 = fluid.layers.linear_chain_crf(
input=emission2,
label=label2,
length=label_length,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using padding
cc=np.random.rand(4,10,10).astype('float32')
dd=np.random.rand(4,10,1).astype('int64')
ll=np.array([[3],[3],[4],[2]])
feed2 = {'input_data2':cc,'label2':dd,'length':ll}
loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2])
print(loss2)
#[array([[ 7.8902354],
# [ 7.3602567],
# [ 10.004011],
# [ 5.86721 ]], dtype=float32)]
#you can use find_var to get transition parameter.
transition=np.array(fluid.global_scope().find_var('crfw').get_tensor())
print(transition)
"""
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1]
transition = helper.create_parameter(
attr=helper.param_attr,
shape=[size + 2, size],
dtype=helper.input_dtype())
alpha = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
emission_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
transition_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
log_likelihood = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
this_inputs = {
"Emission": [input],
"Transition": transition,
"Label": [label]
}
if length:
this_inputs['Length'] = [length]
helper.append_op(
type='linear_chain_crf',
inputs=this_inputs,
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
})
return log_likelihood
@templatedoc()
def crf_decoding(input, param_attr, label=None, length=None):
"""
${comment}
Args:
input(${emission_type}): ${emission_comment}
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
label(${label_type}, optional): ${label_comment}
length(${length_type}, optional): ${length_comment}
Returns:
Variable: ${viterbi_path_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
# LoDTensor-based example
num_labels = 10
feature = fluid.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
label = fluid.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
emission = fluid.layers.fc(input=feature, size=num_labels)
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label,
param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(input=emission,
param_attr=fluid.ParamAttr(name="crfw"))
# Common tensor example
num_labels, max_len = 10, 20
feature = fluid.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
label = fluid.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
length = fluid.data(name='length', shape=[-1, 1], dtype='int64')
emission = fluid.layers.fc(input=feature, size=num_labels,
num_flatten_dims=2)
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
crf_decode = fluid.layers.crf_decoding(input=emission, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
"""
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
inputs = {"Emission": [input], "Transition": transition, "Label": label}
if length:
inputs['Length'] = length
helper.append_op(
type='crf_decoding',
inputs=inputs,
outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path
@templatedoc()
def cos_sim(X, Y):
"""
${comment}
Args:
X (Variable): ${x_comment}.
Y (Variable): ${y_comment}.
Returns:
A Variable holding LoDTensor representing the output of cosine(X, Y).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7], dtype='float32')
y = fluid.data(name='y', shape=[1, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y)
"""
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
helper = LayerHelper('cos_sim', **locals())
out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
ynorm = helper.create_variable_for_type_inference(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out
def dropout(x,
dropout_prob,
is_test=False,
seed=None,
name=None,
dropout_implementation="downgrade_in_infer"):
"""
Computes dropout.
Drop or keep each element of `x` independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
dropout op can be removed from the program to make the program more efficient.
Args:
x (Variable): The input tensor variable. The data type is float16 or float32 or float64.
dropout_prob (float): Probability of setting units to zero.
is_test (bool): A flag indicating whether it is in test phrase or not.
seed (int): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training.Default: None.
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - dropout_prob)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - dropout_prob )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
Returns:
A Variable holding Tensor representing the dropout, has same shape and data type with `x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
dropped = fluid.layers.dropout(x, dropout_prob=0.5)
"""
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode():
if (seed is None or
seed == 0) and default_main_program().random_seed != 0:
seed = default_main_program().random_seed
seed = seed if seed is not None else 0
_is_test = not _dygraph_tracer()._train_mode
out, mask = core.ops.dropout(x, 'dropout_prob', dropout_prob, 'is_test',
_is_test, 'fix_seed', seed is not None,
'seed', seed, 'dropout_implementation',
dropout_implementation)
return out
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed)
helper.append_op(
type='dropout',
inputs={'X': [x]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
@templatedoc()
def chunk_eval(input,
label,
chunk_scheme,
num_chunk_types,
excluded_chunk_types=None,
seq_length=None):
"""
This operator computes the precision, recall and F1-score for chunk detection.
It is often used in sequence tagging tasks, such as Named Entity Recognition(NER).
For some basics of chunking, please refer to
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ .
This operator supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Here is a NER example for the usage of these tagging schemes:
.. code-block:: python
====== ====== ====== ===== == ============ ===== ===== ===== == =========
Li Ming works at Agricultural Bank of China in Beijing.
====== ====== ====== ===== == ============ ===== ===== ===== == =========
IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC
IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC
IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC
IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC
====== ====== ====== ===== == ============ ===== ===== ===== == =========
There are three chunk types(named entity types) including PER(person), ORG(organization)
and LOC(location), and we can see that the labels have the form `<tag type>-<chunk type>` .
Since the implementation of this operator actually uses label ids rather than
label strings, to make it work, there should be a way to map label ids to
tag types and chunk types. This operator uses the following way to do mapping:
.. code-block:: python
tag_type = label % num_tag_type
chunk_type = label / num_tag_type
where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
is the num of chunk types, and `tag_type` get its value from the following table.
.. code-block:: python
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
IOE - 0 1 -
IOBES 0 1 2 3
Accordingly, in the above NER example, if the tagging scheme is IOB and chunk
types are ORG, PER and LOC, then the label ids would be as follows:
.. code-block:: python
B-ORG 0
I-ORG 1
B-PER 2
I-PER 3
B-LOC 4
I-LOC 5
O 6
With which we can map each label id to the corresponding tag type and chunk
type correctly.
Args:
input (Variable): A Tensor or LoDTensor, representing the predicted labels
from the network. When it is a Tensor, its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length; When it is
a LoDTensor, its shape would be `[N, 1]` where `N` stands for the total
sequence lengths in this mini-batch. The data type should be int64.
label (Variable): A Tensor or LoDTensor representing the ground-truth labels.
It should have the same shape, lod and data type as ``input`` .
chunk_scheme (str): Indicate the tagging schemes used here. The value must
be IOB, IOE, IOBES or plain.
num_chunk_types (int): The number of chunk types.
excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
be taken into account. It should be a list of chunk type ids(integer).
Default None.
seq_length(Variable, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. It needn't be
provided if ``input`` and ``label`` are LoDTensor. Default None.
Returns:
tuple: A tuple including precision, recall, F1-score, chunk number detected, \
chunk number in ground-truth, chunk number correctly detected. Each \
is a Tensor with shape `[1]`. The data type of precision, recall and \
F1-score all is float32, and the others' data type all is int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dict_size = 10000
label_dict_len = 7
sequence = fluid.data(
name='id', shape=[-1, 1], lod_level=1, dtype='int64')
embedding = fluid.embedding(
input=sequence, size=[dict_size, 512])
hidden = fluid.layers.fc(input=embedding, size=512)
label = fluid.layers.data(
name='label', shape=[1], lod_level=1, dtype='int32')
crf = fluid.layers.linear_chain_crf(
input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(
input=hidden, param_attr=fluid.ParamAttr(name="crfw"))
fluid.layers.chunk_eval(
input=crf_decode,
label=label,
chunk_scheme="IOB",
num_chunk_types=(label_dict_len - 1) / 2)
"""
helper = LayerHelper("chunk_eval", **locals())
# prepare output
precision = helper.create_variable_for_type_inference(dtype="float32")
recall = helper.create_variable_for_type_inference(dtype="float32")
f1_score = helper.create_variable_for_type_inference(dtype="float32")
num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_label_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_correct_chunks = helper.create_variable_for_type_inference(
dtype="int64")
this_input = {"Inference": [input], "Label": [label]}
if seq_length:
this_input["SeqLength"] = [seq_length]
helper.append_op(
type="chunk_eval",
inputs=this_input,
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score],
"NumInferChunks": [num_infer_chunks],
"NumLabelChunks": [num_label_chunks],
"NumCorrectChunks": [num_correct_chunks]
},
attrs={
"num_chunk_types": num_chunk_types,
"chunk_scheme": chunk_scheme,
"excluded_chunk_types": excluded_chunk_types or []
})
return (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks)
def softmax(input, use_cudnn=False, name=None, axis=-1):
"""
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is the same as the dimension :attr:`axis` of the input
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Args:
input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stability, set use_cudnn to \
False by default.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
will be named automatically. Default: None.
axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. -1 means the last dimension.
Returns:
Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3],dtype="float32")
result = fluid.layers.softmax(data,axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 3).astype("float32")
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
"""
if in_dygraph_mode():
return core.ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
inputs = {"X": [input]}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
helper = LayerHelper('softmax', **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="softmax",
inputs={"X": input},
outputs={"Out": softmax_out},
attrs=attrs)
return softmax_out
def conv2d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCHW"):
"""
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW or NHWC format, where N is batch size, C is the number of
channels, H is the height of the feature, and W is the width of the feature.
Filter is in MCHW format, where M is the number of output image channels,
C is the number of input image channels, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input image channels divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type
of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size
is a tuple, it must contain two integers, (filter_size_height,
filter_size_width). Otherwise, filter_size_height = filter_size_width =\
filter_size.
stride (int|tuple): The stride size. It means the stride in convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension.If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
`data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
[pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel
points. If dilation is a tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv2d, whose data type is the
same with input. If act is None, the tensor variable storing the convolution
result, and if act is not None, the tensor variable storing convolution
and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'conv2d')
num_channels = input.shape[1]
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NHWC")
num_channels = input.shape[3] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
assert param_attr is not False, "param_attr should not be False here."
l_type = 'conv2d'
if (num_channels == groups and num_filters % num_channels == 0 and
not use_cudnn):
l_type = 'depthwise_conv2d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"the channel of input must be divisible by groups,"
"received: the channel of input is {}, the shape of input is {}"
", the groups is {}".format(num_channels, input.shape, groups))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
# padding
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0]
padding = _update_padding(padding, data_format)
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
'fuse_relu_before_depthwise_conv': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
return helper.append_activation(pre_act)
def conv3d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCDHW"):
"""
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
input (Variable): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size.
stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
"""
l_type = 'conv3d'
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NDHWC")
num_channels = input.shape[4] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)." %
(str(num_channels), str(groups)))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 3, 'filter_size')
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0]
padding = _update_padding(padding, data_format)
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCDHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
return helper.append_activation(pre_act)
@templatedoc()
def pool2d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCHW"):
"""
${comment}
Args:
input (Variable): The input tensor of pooling operator which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
`"NHWC"`, where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int.
pool_type: ${pooling_type_comment}
pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width).
Otherwise, the pool stride size will be a square of an int.
pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`,
`pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Otherwise, the pool padding size will be a square of an int.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is `true`.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NDHW"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
# max pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW".
out_1 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0],
data_format = "NCHW")
# Attr(pool_padding) is a string, Attr(data_format) is "NCHW".
out_2 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
data_format = "NCHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received pool_size: %s." % str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s." % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@templatedoc()
def pool3d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCDHW"):
"""
${comment}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of
input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is
the number of channels, `D` is the depth of the feature,
`H` is the height of the feature, and `W` is the width
of the feature.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size
is a tuple or list, it must contain three integers,
(pool_size_Depth, pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
pool_type (string): ${pooling_type_comment}
pool_stride (string|int|list|tuple)): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool stride size is a tuple or list,
it must contain three integers, `[stride_Depth, stride_Height, stride_Width]`.
Otherwise, the pool stride size will be a cube of an int.
pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is true.
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
# max pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# example 1:
# Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW".
out_1 = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0, 1, 2],
global_pooling = False,
data_format = "NCDHW")
# example 2:
# Attr(pool_padding) is a string, Attr(data_format) is "NCDHW".
out_2 = fluid.layers.pool3d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
global_pooling = False,
data_format = "NCDHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received Attr(pool_size): %s." %
str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s" % str(data_format))
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, (list, tuple)):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", ceil_mode must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
size, C is the number of channels, H is the height of the feature, and W is
the width of the feature. Parameters(pool_size) should contain two elements which
represent height and width, respectively. Also the H and W dimensions of output(Out)
is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]]
For average adaptive pool2d:
.. math::
hstart &= floor(i * H_{in} / H_{out})
hend &= ceil((i + 1) * H_{in} / H_{out})
wstart &= floor(j * W_{in} / W_{out})
wend &= ceil((j + 1) * W_{in} / W_{out})
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Args:
input (Variable): The input tensor of pooling operator, which is a 4-D tensor
with shape [N, C, H, W]. The format of input tensor is NCHW,
where N is batch size, C is the number of channels, H is the
height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The output tensor of adaptive pooling result. The data type is same
as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='avg')
# max adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool2d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool2d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d')
check_type(require_index, 'require_index', bool, 'adaptive_pool2d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
if pool_type == "max":
l_type = 'max_pool2d_with_index'
else:
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, D is the depth of the feature, H is the height of
the feature, and W is the width of the feature. Parameters(pool_size) should contain
three elements which represent height and width, respectively. Also the D, H and W
dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape
will be [N, C, pool_size[0], pool_size[1], pool_size[2]]
For average adaptive pool3d:
.. math::
dstart &= floor(i * D_{in} / D_{out})
dend &= ceil((i + 1) * D_{in} / D_{out})
hstart &= floor(j * H_{in} / H_{out})
hend &= ceil((j + 1) * H_{in} / H_{out})
wstart &= floor(k * W_{in} / W_{out})
wend &= ceil((k + 1) * W_{in} / W_{out})
Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
N is batch size, C is the number of channels, D is the depth of the feature,
H is the height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain three integers, (Depth, Height, Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The output tensor of adaptive pooling result. The data type is same as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='avg')
# max adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool3d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool3d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d')
check_type(require_index, 'require_index', bool, 'adaptive_pool3d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
if pool_type == "max":
l_type = 'max_pool3d_with_index'
else:
l_type = "pool3d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False):
"""
**Batch Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\
moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)
moving_mean is global mean and moving_var is global variance.
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global (or running) statistics. (It usually got from the
pre-trained model.)
The training and testing (or inference) have the same behavior:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
Returns:
A Variable holding Tensor which is the result after applying batch normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.batch_norm(input=hidden1)
.. code-block:: python
# batch_norm with momentum as Variable
import paddle.fluid as fluid
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
def get_decay_momentum(momentum_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
momentum = fluid.layers.create_global_var(
shape=[1],
value=float(momentum_init),
dtype='float32',
# set persistable for save checkpoints and resume
persistable=True,
name="momentum")
div_res = global_step / decay_steps
decayed_momentum = momentum_init * (decay_rate**div_res)
fluid.layers.assign(decayed_momentum, momentum)
return momentum
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
momentum = get_decay_momentum(0.9, 1e5, 0.9)
hidden2 = fluid.layers.batch_norm(input=hidden1, momentum=momentum)
"""
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'batch_norm')
dtype = helper.input_dtype()
has_reserve_space = False
if data_layout == 'NHWC':
flag = os.environ.get('FLAGS_cudnn_batchnorm_spatial_persistent')
if flag is not None and flag.lower() in ['true', '1']:
has_reserve_space = True
# use fp32 for bn parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if has_reserve_space:
reserve_space = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.FP16, stop_gradient=True)
batch_norm_out = input if in_place else \
helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return helper.append_activation(batch_norm_out)
def inplace_abn(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
act_alpha=1.0):
"""
**In-place Activation Batch Normalization Layer**
This layer calculates batch normalization and activation with in-place memory.
For batch normalization calculations, see `fluid.layers.batch_norm`.
For in-place activation batch normalization, see `In-Place Activated BatchNorm for
Memory-Optimized Training of DNNs <https://arxiv.org/abs/1712.02616>`_
`inplace_abn` only support activation type as `None`, `identity`, `leaky_relu`,
`elu` currently.
`inplace_abn` only support data type as `float32`, `float64` currently.
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn.
If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, inplace_abn will save global mean with a random name, otherwise, inplace_abn
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, inplace_abn, will save global variance with a random name, otherwise, inplace_abn
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
act_alpha(float, Default 1.0): when activation is in ['elu', 'identity', 'leaky_relu'],
inplace activative batch normalization will be used, and alpha parameter for activation
can be given by this parameter.
Returns:
A Variable holding Tensor which is the result after applying batch normalization and activation on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.inplace_abn(input=hidden1)
hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2)
"""
assert act in [None, 'identity', 'leaky_relu', 'elu'], \
"inplace_abn only support act as None, 'identity', " \
"'leaky_relu', 'elu' currently"
assert bias_attr is not False, "bias_attr should not be False in inplace_abn."
helper = LayerHelper('inplace_abn', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'inplace_abn')
dtype = helper.input_dtype()
has_reserve_space = False
if data_layout == 'NHWC':
flag = os.environ.get('FLAGS_cudnn_batchnorm_spatial_persistent')
if flag is not None and flag.lower() in ['true', '1']:
has_reserve_space = True
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if has_reserve_space:
reserve_space = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.FP16, stop_gradient=True)
batch_norm_out = input
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"activation": act,
"alpha": act_alpha,
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs)
return batch_norm_out
def instance_norm(input,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
name=None):
"""
**Instance Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for
Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Args:
input(variable): The rank of input variable can be 2, 3, 4, 5.
The data type is float32 or float64.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
A Variable holding Tensor which is the result after applying instance normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
assert bias_attr is not False, "bias_attr should not be False in instance_norm."
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
# use fp32 for in parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
channel_num = input_shape[1]
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr,
shape=param_shape,
dtype=dtype,
is_bias=True,
default_initializer=Constant(0.0))
# create output
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
instance_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="instance_norm",
inputs={
"X": input,
"Scale": scale,
"Bias": bias,
},
outputs={
"Y": instance_norm_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={"epsilon": epsilon, })
return instance_norm_out
def data_norm(input,
act=None,
epsilon=1e-05,
param_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
slot_dim=-1,
sync_stats=False,
summary_decay_rate=0.9999999):
"""
**Data Normalization Layer**
This op can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Args:
input(variable): The input variable which is a LoDTensor.
act(string, Default None): Activation type, linear|relu|prelu|...
epsilon(float, Default 1e-05):
param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance
should do model average when model average is enabled.
slot_dim(int): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode, we
distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first
place of the embedding is the historical show number (occurence time of this feature id with a label 0).
If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot
is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate
the show number and judge if the show number is zero. If so, we choose to skip normalization on this
embedding.
sync_stats(bool, Default False): When running with multiple GPU cards, using allreduce to sync the
summary messages.
summary_decay_rate(float, Default 0.9999999): The decay rate when updating summary.
Returns:
Variable: A tensor variable which is the result after applying data normalization on the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
hidden1 = fluid.data(name="hidden1", shape=[64, 200])
hidden2 = fluid.layers.data_norm(name="hidden2", input=hidden1)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
batch_size_default = 1e4
batch_sum_default = 0.0
batch_square_sum_default = 1e4
if param_attr and isinstance(param_attr, dict):
batch_size_default = param_attr.get("batch_size", 1e4)
batch_sum_default = param_attr.get("batch_sum", 0.0)
batch_square_sum_default = param_attr.get("batch_square", 1e4)
# create parameter
batch_size = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_size',
initializer=Constant(value=float(batch_size_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_sum',
initializer=Constant(value=float(batch_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_square_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_square_sum',
initializer=Constant(value=float(batch_square_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
means = helper.create_variable(dtype=dtype, stop_gradient=True)
scales = helper.create_variable(dtype=dtype, stop_gradient=True)
data_norm_out = input if in_place else helper.create_variable(dtype=dtype)
helper.append_op(
type="data_norm",
inputs={
"X": input,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
outputs={
"Y": data_norm_out,
"Means": means,
"Scales": scales,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
attrs={
"epsilon": epsilon,
"slot_dim": slot_dim,
"sync_stats": sync_stats,
"summary_decay_rate": summary_decay_rate
})
return helper.append_activation(data_norm_out)
@templatedoc()
def layer_norm(input,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
**Layer Normalization Layer**
The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Args:
input(Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
Default: 1.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32')
output = exe.run(feed={"x": np_x}, fetch_list = [hidden1])
print(output)
"""
assert in_dygraph_mode(
) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!"
helper = LayerHelper('layer_norm', **locals())
dtype = helper.input_dtype()
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])]
if scale:
assert param_attr is not False, "param_attr should not be False when using scale."
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
else:
if param_attr:
warnings.warn("param_attr is only available with scale is True.")
if shift:
assert bias_attr is not False, "bias_attr should not be False when using shift."
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
else:
if bias_attr:
warnings.warn("bias_attr is only available with shift is True.")
# create output
mean_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
return helper.append_activation(layer_norm_out)
@templatedoc()
def group_norm(input,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
name=None):
"""
**Group Normalization Layer**
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
input(Variable): 4-D Tensor, the data type is float32 or float64.
groups(int): The number of groups that divided from channels, the data type
is int32.
epsilon(float, optional): The small value added to the variance to prevent
division by zero, the data type is float32. Default: 1e-05.
param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
attribute. If a bool type, only False is supported, which means there is no weight parameter.
Default: None, the default weight parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
attribute. If a bool type, only False is supported, which means there is no bias parameter.
Default: None, the default bias parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
act(str, optional): Activation to be applied to the output of group normalization.
data_layout(str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name (str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A 4-D Tensor has same data type and data format with `input`.
Raises:
ValueError: If `data_layout` is neither 'NCHW' nor 'NHWC'.
ValueError: If `groups` is greater than the number of input channels.
ValueError: If `groups` is less than 1.
ShapeError: If the param_attr(Scale) is not 1-D Tensor.
ShapeError: If the param_attr(Scale)'s first dimension size is not equal to the input channels.
ShapeError: If the bias_attr(Bias) is not 1-D Tensor.
ShapeError: If the bias_attr(Bias)'s first dimension size is not equal to the input channels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
x = fluid.layers.group_norm(input=data, groups=4)
"""
helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype()
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
if data_layout != 'NCHW' and data_layout != 'NHWC':
raise ValueError(
"Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received "
+ data_layout + " but only NCHW or NHWC supported.")
channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1]
param_shape = [channel_num]
if param_attr:
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
if bias_attr:
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
# create output
mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_variable(dtype=dtype)
helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": epsilon,
"groups": groups,
"data_layout": data_layout
})
return helper.append_activation(group_norm_out)
@templatedoc()
def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
"""
**Spectral Normalization Layer**
This operation calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Output tensor will be in same shape with input tensor.
Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds. Calculations
as follows:
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Args:
weight(${weight_type}): ${weight_comment}
dim(int): ${dim_comment}
power_iters(int): ${power_iters_comment}
eps(float): ${eps_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A tensor variable of weight parameters after spectral normalization.
The data type and shape is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
weight = fluid.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2)
"""
helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
'spectral_norm')
check_type(dim, 'dim', int, 'spectral_norm')
check_type(power_iters, 'power_iters', int, 'spectral_norm')
check_type(eps, 'eps', float, 'spectral_norm')
dtype = weight.dtype
# create intput and parameters
inputs = {'Weight': weight}
input_shape = weight.shape
h = input_shape[dim]
w = np.prod(input_shape) // h
u = helper.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=dtype,
default_initializer=Normal(0., 1.))
u.stop_gradient = True
inputs['U'] = u
v = helper.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=dtype,
default_initializer=Normal(0., 1.))
inputs['V'] = v
v.stop_gradient = True
# create output
out = helper.create_variable(dtype=dtype)
helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": dim,
"power_iters": power_iters,
"eps": eps,
})
return out
def conv2d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCHW'):
"""
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a 4-D Tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]
Note:
The conv2d_transpose can be seen as the backward of the conv2d. For conv2d,
when stride > 1, conv2d maps multiple input shape to the same output shape,
so for conv2d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,
conv2d_transpose can compute the kernel size automatically.
Args:
input(Variable): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
its data type is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_height, image_width). None if use
filter_size, padding, and stride to calculate output_size.
If output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None. output_size and filter_size
should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None. filter_size and
output_size should not be None at the same time.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
`dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
If `padding` is a tuple or list, it could be in three forms:
`[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and
when `data_format` is `'NCHW'`,
`padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NHWC'`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv2d_transpose, whose
data type is the same with input and shape is (num_batches, channels, out_h,
out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor variable
storing the transposed convolution result, and if act is not None, the
tensor variable storing transposed convolution and non-linearity activation
result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
"""
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received "
+ data_format + " but only NCHW or NHWC supported.")
input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1]
op_type = 'conv2d_transpose'
if (input_channel == groups and num_filters == input_channel and
not use_cudnn):
op_type = 'depthwise_conv2d_transpose'
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
else:
padding = utils.convert_to_list(padding, 2, 'padding')
padding = [padding[0], padding[0], padding[1], padding[1]]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size]
h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size = [filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 2,
'conv2d_transpose.filter_size')
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
out = helper.append_activation(pre_act)
return out
def conv3d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCDHW'):
"""
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a Tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]
Note:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Args:
input(Variable): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
of input is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. If output_size and filter_size are
specified at the same time, They should follow the formula above. Default: None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size. None if use output size to
calculate filter_size. Default: None. filter_size and output_size should not be
None at the same time.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
Default: stride = 1.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups=1
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
variable storing transposed convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
"""
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received "
+ data_format + " but only NCDHW or NDHWC supported.")
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable")
input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[
-1]
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
else:
padding = utils.convert_to_list(padding, 3, 'padding')
padding = [
padding[0], padding[0], padding[1], padding[1], padding[2],
padding[2]
]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size, output_size]
d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1]
h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2]
w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3]
filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] +
padding[5] - 1) // dilation[2] + 1
filter_size = [filter_size_d, filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 3,
'conv3d_transpose.filter_size')
if len(padding) == 6 and utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
if data_format == 'NCDHW':
data_format = 'NCHW'
if data_format == 'NDHWC':
data_format = 'NHWC'
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=l_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
out = helper.append_activation(pre_act)
return out
def reduce_sum(input, dim=None, keep_dim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of summation operation on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_sum(x) # [3.5]
fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6]
fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
}
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def reduce_mean(input, dim=None, keep_dim=False, name=None):
"""
Computes the mean of the input tensor's elements along the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the mean is computed. If
`None`, compute the mean over all elements of :attr:`input`
and return a variable with a single element, otherwise it
must be in the range :math:`[-rank(input), rank(input))`. If
:math:`dim[i] < 0`, the dimension to reduce is
:math:`rank(input) + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of average on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_mean(x) # [0.4375]
fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8]
fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4]
fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5]
fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
return core.ops.reduce_mean(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
}
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_mean')
helper = LayerHelper('reduce_mean', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_mean',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def reduce_max(input, dim=None, keep_dim=False, name=None):
"""
Computes the maximum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of maximum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_max(x) # [0.9]
fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9]
fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7]
fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0]
fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0]
"""
helper = LayerHelper('reduce_max', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_max',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_min(input, dim=None, keep_dim=False, name=None):
"""
Computes the minimum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of minimum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_min(x) # [0.1]
fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7]
fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1]
fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0]
fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0]
"""
helper = LayerHelper('reduce_min', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_min',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_prod(input, dim=None, keep_dim=False, name=None):
"""
Computes the product of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the product is performed. If
:attr:`None`, multiply all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of product on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_prod(x) # [0.0002268]
fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63]
fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084]
fluid.layers.reduce_prod(x, dim=1,
keep_dim=True) # [[0.027], [0.0084]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0]
fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0]
"""
helper = LayerHelper('reduce_prod', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_prod',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_all(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. The default value is None.
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = layers.cast(x, 'bool')
out = layers.reduce_all(x) # False
out = layers.reduce_all(x, dim=0) # [True, False]
out = layers.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
helper = LayerHelper('reduce_all', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_all',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_any(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = layers.cast(x, 'bool')
out = layers.reduce_any(x) # True
out = layers.reduce_any(x, dim=0) # [True, False]
out = layers.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
helper = LayerHelper('reduce_any', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_any',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor or LoDTensor, data type being float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If :attr:`num_or_sections` is an integer,
then the integer indicates the number of equal sized sub-Tensors
that the Tensor will be divided into. If :attr:`num_or_sections`
is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
:attr:`dim` dimension orderly. The length of the list mustn't be larger than the Tensor's size of :attr:`dim` .
dim (int32|Varible, optional): A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. The dimension along which to split. If :math:`dim < 0`, the
dimension to split along is :math:`rank(input) + dim`. Default is -1.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Variable): The list of segmented Tensor variables.
Raises:
TypeError: num_or_sections is not int, list or tuple.
TypeError: dim is not int or Variable.
Example:
.. code-block:: python
import paddle.fluid as fluid
# input is a variable which shape is [3, 9, 5]
input = fluid.data(
name="input", shape=[3, 9, 5], dtype="float32")
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
# x0.shape [3, 3, 5]
# x1.shape [3, 3, 5]
# x2.shape [3, 3, 5]
x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1)
# x0.shape [3, 2, 5]
# x1.shape [3, 3, 5]
# x2.shape [3, 4, 5]
x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1)
# x0.shape [3, 2, 5]
# x1.shape [3, 3, 5]
# x2.shape [3, 4, 5]
"""
if in_dygraph_mode():
num = None
attrs = ()
if isinstance(dim, Variable):
dim = dim.numpy()
assert dim.shape == (1,
), "dim of type Variable should have shape [1]"
dim = dim[0]
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs += ('axis', dim)
if isinstance(num_or_sections, int):
num = num_or_sections
attrs += ('num', num_or_sections)
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
if utils._contain_var(num_or_sections):
raise TypeError(
"The type of 'num_or_sections' in split must be int or list[int] or tuple[int] in Dygraph mode, but "
"received %s, which contains Variable." %
(type(num_or_sections)))
else:
attrs += ('sections', list(num_or_sections))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int or list in Dygraph mode, but "
"received %s." % (type(num_or_sections)))
return core.ops.split(input, num, *attrs)
if not isinstance(num_or_sections, (int, list, tuple)):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or "
"tuple, but received %s." % (type(num_or_sections)))
if not isinstance(dim, (int, Variable)):
raise TypeError(
"The type of 'dim' in split must be int or Variable, but "
"received %s." % (type(dim)))
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert (isinstance(dim_size, int))
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1." %
idx)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections ==0, \
"The input's size along the split dimension " \
"must be evenly divisible by Attr(num_or_sections). " \
"But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim])
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert len(num_or_sections) <= input_shape[
dim], 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections)
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs)
return outs
def l2_normalize(x, axis, epsilon=1e-12, name=None):
"""
This op normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math::
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`.
Args:
x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the
last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, \
the default value is 1e-12.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output has the same shape and data type with `x`.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,3])
output = fluid.layers.l2_normalize(x=input,axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3).astype("float32")
print(input_data)
# [[0.5171216 0.12704141 0.56018186]
# [0.93251234 0.5382788 0.81709313]]
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([[0.48496857, 0.22970329, 0.56545246],
# [0.8745316 , 0.9732607 , 0.82478094]], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.l2_normalize(x=input, axis=-1)
print(output.numpy())
# [[0.66907585 0.16437206 0.7247892 ]
# [0.6899054 0.3982376 0.6045142 ]]
"""
if len(x.shape) == 1:
axis = 0
helper = LayerHelper("l2_normalize", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
norm = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="norm",
inputs={"X": x},
outputs={"Out": out,
"Norm": norm},
attrs={
"axis": 1 if axis is None else axis,
"epsilon": epsilon,
})
return out
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# fluid.layers.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# fluid.layers.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# fluid.layers.matmul(x, y) # out: [1]
# x: [M], y: [N]
# fluid.layers.matmul(x, y, True, True) # out: [M, N]
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
"""
return paddle.matmul(x, y, transpose_x, transpose_y, alpha, name)
def topk(input, k, name=None):
"""
This OP is used to find values and indices of the k largest entries
for the last dimension.
If the input is a 1-D Tensor, finds the k largest entries and outputs
their values and indices.
If the input is a Tensor with higher rank, this operator computes the top k
entries along the last dimension.
.. code-block:: text
Case 1:
Input:
input.shape = [3, 4]
input.data = [[5, 4, 2, 3],
[9, 7, 10, 25],
[6, 2, 10, 1]]
k = 2
Output:
The first output:
values.shape = [3, 2]
values.data = [[5, 4],
[10, 25],
[6, 10]]
The second output:
indices.shape = [3, 2]
indices.data = [[0, 1],
[2, 3],
[0, 2]]
Args:
input(Variable): The input tensor. Support data types: float32, float64.
k(int | Variable): The number of top elements to look for along the last dimension
of input tensor.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`.
Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values.
Raises:
ValueError: If :math:`k < 1` or :math:`k > last dimension of input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32')
top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5]
# 1D Tensor
input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32')
top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5]
# k=Variable
input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32')
vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0]
vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k]
"""
if in_dygraph_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices = core.ops.top_k(input, 'k', _k)
out.stop_gradient = True
indices.stop_gradient = True
return out, indices
inputs = {"X": [input]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
helper = LayerHelper("top_k", **locals())
values = helper.create_variable_for_type_inference(dtype=input.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
values.stop_gradient = True
indices.stop_gradient = True
return values, indices
def ctc_greedy_decoder(input,
blank,
input_length=None,
padding_value=0,
name=None):
"""
This op is used to decode sequences by greedy policy by the following steps:
1. Get the indexes of maximum value for each row in input. a.k.a.
numpy.argmax(input, axis=0).
2. For each sequence in result of step1, merge repeated tokens between two
blanks and delete all blanks.
This op is implemented in two modes: lod and padding, either of them can be used.
The input can be either LoDTensor or Tensor, corresponding to lod and padding
mode respectively.
A simple example as below:
.. code-block:: text
Given:
(1) for lod mode:
input.data = [[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1],
[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]
input.lod = [[4, 4]]
Computation:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]]
step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence:
[[2], [1]]
Finally:
output.data = [[2],
[1],
[3]]
output.lod = [[2, 1]]
(2) for padding mode:
input.data = [[[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1]],
[[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]]
input_length.data = [[4], [4]]
input.shape = [2, 4, 4]
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
step2: Change the argmax result to use padding mode, then argmax result is
[[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
step3: Apply ctc_align to padding argmax result, padding_value is 0
Finally:
output.data = [[2, 1, 0, 0],
[3, 0, 0, 0]]
output_length.data = [[2], [1]]
Parameters:
input(Variable): the probabilities of variable-length sequences. When in lod mode,
it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. When in padding mode,
it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
(not including the blank label). The data type can be float32 or float64.
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in the half-opened
interval [0, num_classes + 1).
input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
It is used for padding mode. In lod mode, input_length is None.
padding_value(int): padding value.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with empty \
LoD [[]].
For padding mode, returns a tuple of (output, output_length), which was described as below:
output, 2-D Tensor, shape is [batch_size, N], data type is int64.
output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
each sequence of output for padding mode.
Return type:
For lod mode: Variable
For padding mode: tuple of two Variables (output, output_length).
Examples:
.. code-block:: python
# for lod mode
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
# for padding mode
x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
input_length=x_pad_len)
"""
helper = LayerHelper("ctc_greedy_decoder", **locals())
_, topk_indices = topk(input, k=1)
# ctc align op
ctc_out = helper.create_variable_for_type_inference(dtype="int64")
if input_length is None:
helper.append_op(
type="ctc_align",
inputs={"Input": [topk_indices]},
outputs={"Output": [ctc_out]},
attrs={"merge_repeated": True,
"blank": blank})
return ctc_out
else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = squeeze(topk_indices, [2])
helper.append_op(
type="ctc_align",
inputs={"Input": [ctc_input],
"InputLength": [input_length]},
outputs={"Output": [ctc_out],
"OutputLength": [ctc_out_len]},
attrs={
"merge_repeated": True,
"blank": blank,
"padding_value": padding_value
})
return ctc_out, ctc_out_len
def transpose(x, perm, name=None):
"""
Permute the data dimensions of `input` according to `perm`.
The `i`-th dimension of the returned tensor will correspond to the
perm[i]-th dimension of `input`.
Args:
x (Variable): The input Tensor. It is a N-D Tensor of data types float32, float64, int32.
perm (list): Permute the input according to the data of perm.
name (str): The name of this layer. It is optional.
Returns:
Variable: A transposed n-D Tensor, with data type being float32, float64, int32, int64.
For Example:
.. code-block:: text
x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]]
[[13 14 15 16] [17 18 19 20] [21 22 23 24]]]
shape(x) = [2,3,4]
# Example 1
perm0 = [1,0,2]
y_perm0 = [[[ 1 2 3 4] [13 14 15 16]]
[[ 5 6 7 8] [17 18 19 20]]
[[ 9 10 11 12] [21 22 23 24]]]
shape(y_perm0) = [3,2,4]
# Example 2
perm1 = [2,1,0]
y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]]
[[ 2 14] [ 6 18] [10 22]]
[[ 3 15] [ 7 19] [11 23]]
[[ 4 16] [ 8 20] [12 24]]]
shape(y_perm1) = [4,3,2]
Examples:
.. code-block:: python
# use append_batch_size=False to avoid prepending extra
# batch size in shape
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3, 4],
dtype='float32', append_batch_size=False)
x_transposed = fluid.layers.transpose(x, perm=[1, 0, 2])
print x_transposed.shape
#(3L, 2L, 4L)
"""
if in_dygraph_mode():
out, _ = core.ops.transpose2(x, 'axis', perm)
return out
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', list, 'transpose')
if len(perm) != len(x.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s." % (len(x.shape), len(perm)))
for idx, dim in enumerate(perm):
if dim >= len(x.shape):
raise ValueError(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d." % (idx, perm[idx], len(x.shape)))
helper = LayerHelper('transpose', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out],
'XShape': [x_shape]},
attrs={'axis': perm})
return out
def im2sequence(input,
filter_size=1,
stride=1,
padding=0,
input_image_size=None,
out_stride=1,
name=None):
"""
Extracts image patches from the input tensor to form a tensor of shape
{input.batch_size * output_height * output_width, filter_size_height *
filter_size_width * input.channels}. This op use filter to scan images
and convert these images to sequences. After expanding, the number of time step are
output_height * output_width for an image, in which output_height and
output_width are calculated by below equation:
.. math::
output\_height = 1 + \
(padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height - 1) / stride\_height \\\\
output\_width = 1 + \
(padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width - 1) / stride\_width
And the dimension of each time step is filter_size_height * filter_size_width * input.channels.
Parameters:
input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32.
filter_size(int32 | List[int32]): The filter size. If filter_size is a List,
it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` .
Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1.
stride(int32 | List[int32]): The stride size. If stride is a List, it must
contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1.
padding(int32 | List[int32]): The padding size. If padding is a List, it can
contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate
paddings of four direction. Or it can contain two integers :math:`[padding\_height, padding\_width]` which means
padding_up = padding_down = padding_height and
padding_left = padding_right = padding_width. Otherwise, a scalar padding means
padding_up = padding_down = padding_left = padding_right = padding.
Default is 0.
input_image_size(Variable, optional): the input contains image real size.It's dim
is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None.
out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None.
If out_stride is List, it must contain two integers,
:math:`[out\_stride\_height, out\_stride\_W]` . Otherwise,
the out_stride_height = out_stride_width = out_stride. Default is 1.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \
filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32.
Return Type: Variable
Examples:
.. code-block:: text
Given:
x = [[[[ 6. 2. 1.]
[ 8. 3. 5.]
[ 0. 2. 6.]]
[[ 2. 4. 4.]
[ 6. 3. 0.]
[ 6. 4. 7.]]]
[[[ 6. 7. 1.]
[ 5. 7. 9.]
[ 2. 4. 8.]]
[[ 1. 2. 1.]
[ 1. 3. 5.]
[ 9. 0. 8.]]]]
x.dims = {2, 2, 3, 3}
And:
filter = [2, 2]
stride = [1, 1]
padding = [0, 0]
Then:
output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.]
[ 2. 1. 3. 5. 4. 4. 3. 0.]
[ 8. 3. 0. 2. 6. 3. 6. 4.]
[ 3. 5. 2. 6. 3. 0. 4. 7.]
[ 6. 7. 5. 7. 1. 2. 1. 3.]
[ 7. 1. 7. 9. 2. 1. 3. 5.]
[ 5. 7. 2. 4. 1. 3. 9. 0.]
[ 7. 9. 4. 8. 3. 5. 0. 8.]]
output.dims = {8, 8}
output.lod = [[4, 4]]
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
input=data, stride=[1, 1], filter_size=[2, 2])
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
if len(padding) == 2:
padding.append(padding[0])
padding.append(padding[1])
inputs = {"X": input}
attrs = {"kernels": filter_size, "strides": stride, "paddings": padding}
if input_image_size:
if isinstance(out_stride, int):
out_stride = [out_stride, out_stride]
inputs["Y"] = input_image_size
attrs["out_stride"] = out_stride
helper = LayerHelper('im2sequence', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
${comment}
Args:
input (${x_type}): ${x_comment}.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
>>> # for LodTensor inputs
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[9, 16],
>>> dtype='float32', lod_level=1)
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
>>> # for Tensor inputs
>>> x = fluid.data(name='x', shape=[9, 4, 16], dtype='float32')
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
"""
helper = LayerHelper('row_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [future_context_size + 1, input.shape[-1]]
filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [filter_param]},
outputs={'Out': [out]})
return helper.append_activation(out)
@templatedoc()
def multiplex(inputs, index):
"""
Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.
If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .
And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .
For Example:
.. code-block:: text
Given:
inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
[[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
[[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
[[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]
index = [[3],[0],[1],[2]]
out = [[3,0,3,4], # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
[0,1,3,4], # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
[1,2,4,2], # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
[2,3,3,4]] # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]
Args:
inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
index (Variable): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
Returns:
Variable(Tensor): Output of multiplex OP, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x1 = fluid.data(name='x1', shape=[None, 2], dtype='float32')
x2 = fluid.data(name='x2', shape=[None, 2], dtype='float32')
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
out = fluid.layers.multiplex(inputs=[x1, x2], index=index)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
index = np.array([[1], [0]]).astype(np.int32)
res = exe.run(fluid.default_main_program(), feed={'x1':img1, 'x2':img2, 'index':index}, fetch_list=[out])
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
helper = LayerHelper('multiplex', **locals())
if not isinstance(inputs, list) and len(inputs) < 2:
raise ValueError("inputs should be a list object and contains at least "
"2 elements.")
out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op(
type='multiplex',
inputs={'X': inputs,
'Ids': index},
outputs={'Out': [out]})
return out
def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
"""
This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`.
It takes the first dimension of :attr:`x` and :attr:`y` as batch size.
For each instance, it computes the smooth L1 loss element by element first
and then sums all the losses. So the shape of output Variable is
[batch_size, 1].
Args:
x (Variable): A tensor with rank at least 2. The input value of smooth
L1 loss op with shape [batch_size, dim1, ..., dimN].
A LoDTensor or Tensor with type float32.
y (Variable): A tensor with rank at least 2. The target value of smooth
L1 loss op with same shape as :attr:`x`.
A LoDTensor or Tensor with type float32.
inside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
by this tensor element by element.
A Tensor with type float32.
outside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the out smooth L1 loss will be multiplied by this tensor
element by element.
A Tensor with type float32.
sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
scalar with default value 1.0.
Returns:
Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
result = fluid.layers.smooth_l1(data,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,3).astype("float32")
y = np.random.rand(3,3).astype("float32")
output= exe.run(feed={"x":x, "y":y},
fetch_list=[result])
print(output)
#[array([[0.08220536],
# [0.36652038],
# [0.20541131]], dtype=float32)]
"""
helper = LayerHelper('smooth_l1_loss', **locals())
diff = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='smooth_l1_loss',
inputs={
'X': x,
'Y': y,
'InsideWeight': inside_weight,
'OutsideWeight': outside_weight
},
outputs={'Diff': diff,
'Out': loss},
attrs={'sigma': sigma if sigma is not None else 1.0})
return loss
def one_hot(input, depth, allow_out_of_range=False):
"""
**WARING:** This OP requires the last dimension of Tensor shape must be equal to 1.
This OP will be deprecated in a future release. It is recommended to use fluid. :ref:`api_fluid_one_hot` .
The operator converts each id in the input to an one-hot vector with a
:attr:`depth` length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor or LoDTensor is generated by adding :attr:`depth` dimension
behind the last dimension of the input shape.
.. code-block:: text
Example 1 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [3], [0]]
depth = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2 (allow_out_of_range=True):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = True
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data.
[1., 0., 0., 0.]]
Example 3 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = False
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than depth.
Allow_out_of_range =False means that does not allow the word id to exceed depth,
so it throws an exception.
Args:
input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k, 1]` ,
which contains at least one dimension and the last dimension must be 1.
The data type is int32 or int64.
depth(scalar): An integer defining the :attr:`depth` of the one hot dimension. If input
is word id, depth is generally the dictionary size.
allow_out_of_range(bool): A bool value indicating whether the input
indices could be out of range :math:`[0, depth)` . When input indices are
out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range`
is False, or zero-filling representations is created if it is set True.
Default: False.
Returns:
Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4].
label = fluid.data(name="label", shape=[4, 1], dtype="int64")
one_hot_label = fluid.layers.one_hot(input=label, depth=4)
"""
if in_dygraph_mode():
if isinstance(depth, Variable):
depth = depth.numpy()
assert depth.shape == (
1, ), "depth of type Variable should have shape [1]"
depth = depth[0]
out = core.ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
allow_out_of_range)
out.stop_gradient = True
return out
helper = LayerHelper("one_hot", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable):
# user attribute
inputs = {'X': input}
attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
else:
depth.stop_gradient = True
inputs = {'X': input, 'depth_tensor': depth}
attrs = {'allow_out_of_range': allow_out_of_range}
helper.append_op(
type="one_hot",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out})
one_hot_out.stop_gradient = True
return one_hot_out
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
"""
Create an auto-increase variable. which will be automatically increased
by 1 in every iteration. By default, the first return of this counter is 1,
and the step size is 1.
Args:
counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
begin(int, optional): The first return value of this counter. Default 1.
step(int, optional): The step size. Default 1.
Returns:
Variable: The auto-increased Variable with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
global_step = fluid.layers.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
"""
helper = LayerHelper('global_step_counter')
if counter_name is None:
counter_name = '@STEP_COUNTER@'
counter, is_new_var = helper.create_or_get_global_variable(
name=counter_name,
dtype='int64',
shape=[1],
persistable=True,
belong_to_optimizer=True)
if is_new_var:
helper.set_variable_initializer(
counter, initializer=Constant(
value=begin - 1, force_cpu=True))
helper.main_program.global_block()._prepend_op(
type='increment',
inputs={'X': [counter]},
outputs={'Out': [counter]},
attrs={'step': float(step)})
counter.stop_gradient = True
return counter
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
"""
This operator changes the shape of ``x`` without changing its data.
The target shape can be given by ``shape`` or ``actual_shape``.
When ``shape`` and ``actual_shape`` are set at the same time,
``actual_shape`` has a higher priority than ``shape``
but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
guarantee shape inference in compile-time.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
**Note**:
The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
shape(list|tuple|Variable): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
according to this given shape rather than ``shape`` specifying shape.
That is to say ``actual_shape`` has a higher priority
than ``shape(list|tuple)`` but not ``shape(Variable)``. \
This argument ``actual_shape`` will be removed in a future version. \
Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
are the same variable. Otherwise, the input and output of
``layers.reshape`` are different variable. Default False. Note that if ``x``
is more than one OPs' input, ``inplace`` must be False.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
Raises:
TypeError: If actual_shape is neither Variable nor None.
ValueError: If more than one elements of ``shape`` is -1.
ValueError: If the element of ``shape`` is 0, the corresponding dimension should be less than or equal to the dimension of ``x``.
ValueError: If the elements in ``shape`` is negative except -1.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
data_1 = fluid.data(
name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape(
x=data_1, shape=[-1, 0, 3, 2], inplace=True)
# the shape of reshaped_1 is [2,4,3,2].
# example 2:
# attr shape is a list which contains tensor Variable.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
# example 3:
data_3 = fluid.data(
name="data_3", shape=[2,4,6], dtype='float32')
reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
# the shape of reshaped_3 is [6,8].
"""
if in_dygraph_mode():
#TODO(zhiqiu): enable inplace in dygraph mode.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
attrs = {}
if isinstance(shape, (list, tuple)):
if utils._contain_var(shape):
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
attrs['shape'] = shape
else:
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
out, _ = core.ops.reshape2(x, 'shape', shape)
return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
helper = LayerHelper("reshape2", **locals())
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1." % dim_idx)
unk_dim_idx = dim_idx
elif dim_size == 0:
assert dim_idx < len(x.shape), (
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d." %
(dim_idx, len(x.shape)))
else:
assert dim_size > 0, (
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s." %
(dim_idx, str(dim_size)))
return attrs_shape
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensor'] = get_new_shape_tensor(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = x if inplace else helper.create_variable_for_type_inference(
dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return helper.append_activation(out)
def squeeze(input, axes, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Support data type: float32, float64, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x = fluid.data(name='x', shape=[None, 5, 1, 10])
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axes', list, 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
outputs={"Out": out,
"XShape": x_shape})
return out
def unsqueeze(input, axes, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input Tensor to be unsqueezed. It is a N-D Tensor of data types float32, float64, int32.
axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
name (str|None): Name for this layer.
Returns:
Variable: Output unsqueezed Tensor, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10])
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
if not isinstance(axes, (int, list, tuple, Variable)):
raise TypeError(
"The type of 'axes' in unsqueeze must be int, list, tuple or Variable, but "
"received %s." % (type(axes)))
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
def _to_Variable_list(one_list):
Variable_list = []
for ele in one_list:
if isinstance(ele, Variable):
ele.stop_gradient = True
Variable_list.append(ele)
else:
assert (isinstance(ele, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', ele, force_cpu=True, out=temp_out)
Variable_list.append(temp_out)
return Variable_list
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = _to_Variable_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return out
def lod_reset(x, y=None, target_lod=None):
"""
Set LoD of :attr:`x` to a new one specified by :attr:`y` or
:attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be
considered as target LoD first, otherwise :attr:`y.data` would be
considered as target LoD. If :attr:`y` is not provided, target LoD should
be specified by :attr:`target_lod`. If target LoD is specified by
:attr:`y.data` or :attr:`target_lod`, only one level LoD is supported.
.. code-block:: text
* Example 1:
Given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
target_lod: [4, 2]
then we get a 1-level LoDTensor:
out.lod = [[4, 2]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 2:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a Tensor:
y.data = [[2, 4]]
y.dims = [1, 3]
then we get a 1-level LoDTensor:
out.lod = [[2, 4]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 3:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a 2-level LoDTensor:
y.lod = [[2, 2], [2, 2, 1, 1]]
y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
y.dims = [6, 1]
then we get a 2-level LoDTensor:
out.lod = [[2, 2], [2, 2, 1, 1]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
Args:
x (Variable): Input variable which could be a Tensor or LoDTensor.
y (Variable|None): If provided, output's LoD would be derived
from :attr:`y`.
target_lod (list|tuple|None): One level LoD which should be considered
as target LoD when :attr:`y` not provided.
Returns:
Variable: Output variable with LoD specified by this layer.
Raises:
ValueError: If :attr:`y` and :attr:`target_lod` are both None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
out = fluid.layers.lod_reset(x=x, y=y)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_reset')
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None:
if y.lod_level > 0:
check_variable_and_dtype(
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'lod_reset')
else:
check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'lod_reset')
helper.append_op(
type="lod_reset", inputs={'X': x,
'Y': y}, outputs={'Out': out})
elif target_lod is not None:
helper.append_op(
type="lod_reset",
inputs={'X': x},
attrs={'target_lod': target_lod},
outputs={'Out': out})
else:
raise ValueError("y and target_lod should not be both none.")
return out
def lod_append(x, level):
"""
Append level to LoD of :attr:`x`.
.. code-block:: text
* Example 1:
given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
level: [1, 1, 1, 1, 1, 1, 1]
then we get a 2-level LoDTensor:
x.lod = [[ 2, 3, 1 ], [1, 1, 1, 1, 1, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
Args:
x (Variable): Input variable which could be a tensor or LoDTensor.
level (list|tuple|Variable): The LoD level to be appended into LoD of x.
Returns:
Variable: Output variable with new LoD level.
Raises:
ValueError: If :attr:`y` is None or and :attr:`level` is not Iterator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1)
out = fluid.layers.lod_append(x, [1,1,1,1,1,1])
"""
from collections import Iterable
if x is None:
raise ValueError("Input(x) can't be None.")
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
raise ValueError("Input(level) must be list, tuple or Variable.")
helper = LayerHelper("lod_append", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {'X': x}
attrs = {'append': True}
if isinstance(level, Variable):
inputs['Y'] = level
else:
attrs['target_lod'] = level
helper.append_op(
type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
data_format='NCHW'):
"""
This operator implements the Local Response Normalization Layer.
This layer performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
The formula is as follows:
.. math::
Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
In the above equation:
- :math:`n` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
where N is the batch size, C is the input channel, H is Height, W is weight. The data
type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
n (int, optional): The number of channels to sum over. Default: 5
k (float, optional): An offset, positive. Default: 1.0
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: A tensor variable storing the transformation result with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name="data", shape=[None, 3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
print(lrn.shape) # [-1, 3, 112, 112]
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)
if dims != 4:
raise ValueError(
"Input's dimension size of Op(lrn) must be 4, but received %d." %
(dims))
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(lrn) got wrong value: received " +
data_format + " but only NCHW or NHWC supported.")
mid_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={
"n": n,
"k": k,
"alpha": alpha,
"beta": beta,
"data_format": data_format
})
return lrn_out
def pad(x, paddings, pad_value=0., name=None):
"""
This op will pad a tensor with a constant value given by :attr:`pad_value`, and the
padded shape is specified by :attr:`paddings`.
Specifically, the number of values padded before the elements of :attr:`x`
in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number
of values padded after the elements of :attr:`x` in dimension :attr:`i` is
indicated by :attr:`paddings[2*i+1]`.
See below for an example.
.. code-block:: text
Given:
x = [[1, 2], [3, 4]]
paddings = [0, 1, 1, 2]
pad_value = 0
Return:
out = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]]
Args:
x (Variable): Tensor, data type is float32.
paddings (list): A list of integers. Its elements specify the padded
width before and after each dimension in turn.
The length of :attr:`paddings` must be equal to
:math:`rank(x) \\times 2`.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same data type and rank as :attr:`x`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 2 tensor variable
import paddle.fluid as fluid
x = fluid.data(name='data', shape=[300, 300], dtype='float32')
out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
"""
helper = LayerHelper('pad', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad',
inputs={'X': x},
outputs={'Out': out},
attrs={'paddings': paddings,
'pad_value': float(pad_value)})
return out
def pad_constant_like(x, y, pad_value=0., name=None):
"""
Pad :attr:`y` with :attr:`pad_value`, the number of values padded to
the edges of each axis is specified by the difference of the shape
of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7).
See below for an example.
.. code-block:: text
Given:
X = [[[[ 0, 1, 2],
[ 3, 4, 5]],
[[ 6, 7, 8],
[ 9, 10, 11]],
[[12, 13, 14],
[15, 16, 17]]],
[[[18, 19, 20],
[21, 22, 23]],
[[24, 25, 26],
[27, 28, 29]],
[[30, 31, 32],
[33, 34, 35]]]]
X.shape = (2, 3, 2, 3)
Y = [[[[35, 36, 37]],
[[38, 39, 40]],
[[41, 42, 43]]]]
Y.shape = (1, 3, 1, 3)
And
pad_value = 0.
Return:
Out = [[[[35, 36, 37],
[ 0, 0, 0]],
[[38, 39, 40],
[ 0, 0, 0]],
[[41, 42, 43],
[ 0, 0, 0]]],
[[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]]]]
Out.shape = [2, 3, 2, 3]
Args:
x (Variable): Tensor, its shape specifies the shape of output.
y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` ,
:math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3)
# y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3)
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
"""
helper = LayerHelper('pad_constant_like', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad_constant_like',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'pad_value': float(pad_value)})
return out
def label_smooth(label,
prior_dist=None,
epsilon=0.1,
dtype="float32",
name=None):
"""
Label smoothing is a mechanism to regularize the classifier layer and is called
label-smoothing regularization (LSR).
Label smoothing is proposed to encourage the model to be less confident,
since optimizing the log-likelihood of the correct label directly may
cause overfitting and reduce the ability of the model to adapt. Label
smoothing replaces the ground-truth label :math:`y` with the weighted sum
of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
i.e.
.. math::
\\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,
where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
uniform distribution is used for :math:`\mu`.
See more details about label smoothing in https://arxiv.org/abs/1512.00567.
Parameters:
label(Variable): The input variable containing the label data. The
label data should use one-hot representation. It's
a multidimensional tensor with a shape of
:math:`[N_1, ..., Depth]`, where Depth is class number.
prior_dist(Variable, optional): The prior distribution to be used to smooth
labels. If not provided, an uniform distribution
is used. It's a multidimensional tensor with a shape of
:math:`[1, class\_num]` . The default value is None.
epsilon(float, optional): The weight used to mix up the original ground-truth
distribution and the fixed distribution. The default value is
0.1.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set
as 'float32', 'float64'. The default value is 'float32'.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name`.
Returns:
Variable: The tensor variable containing the smoothed labels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
label = layers.data(name="label", shape=[1], dtype="float32")
one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
"""
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
return core.ops.label_smooth(label, prior_dist, 'epsilon',
float(epsilon))
helper = LayerHelper("label_smooth", **locals())
label.stop_gradient = True
smooth_label = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="label_smooth",
inputs={"X": label,
"PriorDist": prior_dist} if prior_dist else {"X": label},
outputs={"Out": smooth_label},
attrs={"epsilon": float(epsilon)})
return smooth_label
@templatedoc()
def roi_pool(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_lod=None):
"""
This operator implements the roi_pooling layer.
Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
The operator has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height;
2. Finding the largest value in each section;
3. Copying these max values to the output buffer.
For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64.
rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
rois_lod (Variable): The lod info of rois. Default: None
pooled_height (int, optional): The pooled output height, data type is int32. Default: 1
pooled_width (int, optional): The pooled output height, data type is int32. Default: 1
spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
Returns:
Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE)
roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place)
rois_lod_data = np.array([0, 2])
x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE)
rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE)
rois_lod = fluid.data(name='rois_lod', shape=[None], dtype='int64')
pool_out = fluid.layers.roi_pool(
input=x,
rois=rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_lod=rois_lod)
exe = fluid.Executor(place)
out, = exe.run(feed={'input':input_data ,'roi':roi_data, 'rois_lod': rois_lod_data}, fetch_list=[pool_out.name])
print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32)
print(np.array(out).shape) # (2, 1, 1, 1)
"""
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="roi_pool",
inputs={"X": input,
"ROIs": rois,
"RoisLod": rois_lod},
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out
@templatedoc()
def roi_align(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
sampling_ratio=-1,
name=None,
rois_lod=None):
"""
${comment}
Args:
input (Variable): ${x_comment}
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. The
data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
(x1, y1) is the top left coordinates, and (x2, y2) is the bottom
right coordinates.
rois_lod (Variable): The lod info of rois. Default: None
pooled_height (int32, optional): ${pooled_height_comment} Default: 1
pooled_width (int32, optional): ${pooled_width_comment} Default: 1
spatial_scale (float32, optional): ${spatial_scale_comment} Default: 1.0
sampling_ratio(int32, optional): ${sampling_ratio_comment} Default: -1
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
Output: ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='data', shape=[None, 256, 32, 32], dtype='float32')
rois = fluid.data(
name='rois', shape=[None, 4], dtype='float32')
rois_lod = fluid.data(name='rois_lod', shape=[None], dtype='int64')
align_out = fluid.layers.roi_align(input=x,
rois=rois,
pooled_height=7,
pooled_width=7,
spatial_scale=0.5,
sampling_ratio=-1,
rois_lod=rois_lod)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'roi_align')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align')
helper = LayerHelper('roi_align', **locals())
dtype = helper.input_dtype()
align_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_align",
inputs={"X": input,
"ROIs": rois,
"RoisLod": rois_lod},
outputs={"Out": align_out},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
"sampling_ratio": sampling_ratio
})
return align_out
def dice_loss(input, label, epsilon=0.00001, name=None):
"""
Dice loss for comparing the similarity between the input predictions and the label.
This implementation is for binary classification, where the input is sigmoid
predictions of each pixel, usually used for segmentation task. The dice loss can
be defined as the following equation:
.. math::
dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\
&= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\
&= \\frac{(union\_area - intersection\_area)}{total\_area}
Parameters:
input (Variable): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_D]`, where :math:`N_1` is
the batch_size, :math:`N_D` is 1. It is usually the output predictions of sigmoid activation.
The data type can be float32 or float64.
label (Variable): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_D]`.
where :math:`N_1` is the batch_size, :math:`N_D` is 1. The data type can be float32 or float64.
epsilon (float): The epsilon will be added to the numerator and denominator.
If both input and label are empty, it makes sure dice is 1.
Default: 0.00001
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The dice loss with shape [1], data type is the same as `input` .
Return Type:
Varaible
Example:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='data', shape = [3, 224, 224, 1], dtype='float32')
label = fluid.data(name='label', shape=[3, 224, 224, 1], dtype='float32')
predictions = fluid.layers.sigmoid(x)
loss = fluid.layers.dice_loss(input=predictions, label=label)
"""
label = one_hot(label, depth=input.shape[-1])
reduce_dim = list(range(1, len(input.shape)))
inse = reduce_sum(input * label, dim=reduce_dim)
dice_denominator = reduce_sum(
input, dim=reduce_dim) + reduce_sum(
label, dim=reduce_dim)
dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
return reduce_mean(dice_score)
def image_resize(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
Parameters:
input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_h, out_w) when input is a 4-D Tensor and is
(out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If
a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'BILINEAR', 'TRILINEAR'
and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for bilinear interpolation. can be \'0\'
for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'BILINEAR',
'TRILINEAR' or 'NEAREST' currently.
ValueError: 'BILINEAR' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.image_resize(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.image_resize(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.image_resize(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.image_resize(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.image_resize(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
}
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'BILINEAR', 'TRILINEAR' "
"or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4:
raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
@templatedoc(op_type="bilinear_interp")
def resize_bilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes the input by performing bilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize bilinear
layer, the shape is (out_h, out_w).Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape,
align_corners, align_mode, data_format)
@templatedoc(op_type="trilinear_interp")
def resize_trilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCDHW'):
"""
This op resizes the input by performing trilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated
in the future and only use :attr:`out_shape` instead.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input depth, height or width.
At least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: A 5-D Tensor(NCDHW or NDHWC)
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,8,10])
#1
output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4])
#3
#x = np.array([3,12,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,8,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12, 12)
#2
# (2, 3, 12, 2, 4)
#3
# (2, 3, 3, 12, 12)
#4
# (2, 3, 3, 4, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12])
print(output.shape)
# [2L, 3L, 12L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'TRILINEAR',
actual_shape, align_corners, align_mode, data_format)
@templatedoc(op_type="nearest_interp")
def resize_nearest(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
data_format='NCHW'):
"""
This op resizes the input by performing nearest neighbor interpolation in both the
height direction and the width direction based on given output shape
which is specified by actual_shape, out_shape and scale in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor(H_{in} * scale_{factor})
W_out = floor(W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
Parameters:
input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_nearest(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_nearest(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_nearest(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(
input,
out_shape,
scale,
name,
'NEAREST',
actual_shape,
align_corners,
align_mode=1,
data_format=data_format)
def image_resize_short(input, out_short_len, resample='BILINEAR'):
"""
This op resizes a batch of images. The short edge of input images will be
resized to the given 'out_short_len'. The long edge of input images
will be resized proportionately to make images' length-width ratio
constant.
Parameters:
input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
out_short_len(int): The length of output images' short edge.
resample (str): resample method, default: BILINEAR.
Returns:
Variable: 4-D tensor(NCHW).
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
out = fluid.layers.image_resize_short(input, out_short_len=3)
"""
in_shape = input.shape
if len(in_shape) != 4:
raise ValueError(
"The rank of input must be 4 (num_batches, channels, in_h, in_w).")
hw = in_shape[2:4]
short_idx = hw.index(min(hw))
long_idx = 1 - short_idx
out_shape = list(hw)
out_shape[short_idx] = out_short_len
out_shape[long_idx] = int(
float(out_shape[long_idx]) * (float(out_short_len) / float(hw[
short_idx])) + 0.5)
return image_resize(input=input, out_shape=out_shape, resample=resample)
def gather(input, index, overwrite=True):
"""
**Gather Layer**
Output is obtained by gathering entries of the outer-most dimension
of X indexed by `index` and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2],
[3, 4],
[5, 6]]
Index = [1, 2]
Then:
Out = [[3, 4],
[5, 6]]
Args:
input (Variable): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Variable): The index input tensor with rank=1. Data type is int32 or int64.
overwrite (bool, optional): The mode that updating the grad when has same index.
If True, use the overwrite mode to update the grad of the same index,
if False, use the accumulate mode to update the grad of the same index.
Default value is True.
Returns:
output (Variable): The output is a tensor with the same rank as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 5], dtype='float32')
index = fluid.data(name='index', shape=[-1, 1], dtype='int32')
output = fluid.layers.gather(x, index)
"""
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather",
inputs={"X": input,
"Index": index},
outputs={"Out": out},
attrs={'overwrite': overwrite})
return out
def gather_nd(input, index, name=None):
"""
**Gather Nd Layer**
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
input = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
input.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(input, index)
= [input[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(input, index)
= [input[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(input, index)
= [input[1, 2, 3]]
= [23]
Args:
input (Variable): The source input. Its dtype should be int32, int64, float32, float64.
index (Variable): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name (str|None): A name for this layer(optional). If set None, the
layer will be named automatically.
Returns:
output (Variable): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32')
index = fluid.data(name='index', shape=[2, 2], dtype='int32')
output = fluid.layers.gather_nd(x, index)
"""
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_nd",
inputs={"X": input,
"Index": index},
outputs={"Out": output})
return output
def scatter(input, index, updates, name=None, overwrite=True):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
input = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as input
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
input[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
input[index[i]] = updates[i]
else:
input[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
Args:
input (Variable): The input N-D Tensor with rank>=1. Data type can be float32.
index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.
Default value is True.
Returns:
Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False)
index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False)
updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False)
output = fluid.layers.scatter(input, index, updates, overwrite=False)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
index_data = np.array([2, 1, 0, 1]).astype(np.int64)
update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output])
print(res)
# [array([[3., 3.],
# [6., 6.],
# [1., 1.]], dtype=float32)]
"""
helper = LayerHelper('scatter', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter",
inputs={"X": input,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
def scatter_nd_add(ref, index, updates, name=None):
"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Variable.
:attr:`ref` is a Tensor with rank :math:`R`
and :attr:`index` is a Tensor with rank :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with rank :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`ref` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
ref = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
ref = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
ref.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
ref (Variable): The ref input. Its dtype should be float32, float64.
index (Variable): The index input with rank > 1 and index.shape[-1] <= ref.rank.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd_add op, and it must have the same dtype
as ref. It must have the shape index.shape[:-1] + ref.shape[index.shape[-1]:].
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same shape and dtype as ref.
Examples:
.. code-block:: python
import paddle.fluid as fluid
ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32')
index = fluid.data(name='index', shape=[3, 2], dtype='int32')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
output = fluid.layers.scatter_nd_add(ref, index, updates)
"""
if ref.dtype != updates.dtype:
raise ValueError("ref and updates must have same data type.")
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
"Index": index,
"Updates": updates},
outputs={"Out": output})
return output
def scatter_nd(index, updates, shape, name=None):
"""
**Scatter_nd Layer**
Output is obtained by scattering the :attr:`updates` in a new tensor according
to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the
tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)`
is equal to :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)` .
If :attr:`index` has repeated elements, then the corresponding updates are accumulated.
Because of the numerical approximation issues, the different order of repeated elements
in :attr:`index` may cause different results. The specific calculation method can be
seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op.
Args:
index (Variable): The index input with rank > 1 and index.shape[-1] <= len(shape).
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd op. Its dtype should be float32, float64.
It must have the shape index.shape[:-1] + shape[index.shape[-1]:]
shape(tuple|list): Shape of output tensor.
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same type as :attr:`updates` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
index = fluid.data(name='index', shape=[3, 2], dtype='int64')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = fluid.layers.scatter_nd(index, updates, shape)
"""
return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name)
@templatedoc()
def random_crop(x, shape, seed=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
shape(${shape_type}): ${shape_comment}
seed(int|${seed_type}|None): ${seed_comment} By default, the seed will
get from `random.randint(-65536, 65535)`.
Returns:
${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
img = fluid.data("img", [None, 3, 256, 256])
# cropped_img is [-1, 3, 224, 224]
cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224])
# cropped_img2 shape: [-1, 2, 224, 224]
# cropped_img2 = fluid.layers.random_crop(img, shape=[2, 224, 224])
# cropped_img3 shape: [-1, 3, 128, 224]
# cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224])
"""
helper = LayerHelper("random_crop", **locals())
dtype = x.dtype
out = helper.create_variable_for_type_inference(dtype)
if seed is None:
seed = np.random.randint(-65536, 65536)
op_attrs = {"shape": shape}
if isinstance(seed, int):
op_attrs["startup_seed"] = seed
seed = helper.create_variable(
name=unique_name.generate("random_crop_seed"),
dtype="int64",
persistable=True)
elif not isinstance(seed, Variable):
raise ValueError("'seed' must be a Variable or an int.")
helper.append_op(
type="random_crop",
inputs={"X": x,
"Seed": seed},
outputs={"Out": out,
"SeedOut": seed},
attrs=op_attrs)
return out
def log(x, name=None):
"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x)
Args:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[1], dtype="float32")
res = fluid.layers.log(x)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1], [2]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return core.ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
return out
@templatedoc()
def relu(x, name=None):
"""
${comment}
Args:
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]
"""
if in_dygraph_mode():
return core.ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
inputs = {'X': [x]}
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out})
return out
def selu(x, scale=None, alpha=None, name=None):
"""
Selu Operator.
The equation is:
.. math::
selu= \\lambda*
\\begin{cases}
x &\\quad \\text{ if } x>0 \n
\\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0
\\end{cases}
The input `X` can carry the LoD (Level of Details) information,
or not. And the output shares the LoD information with input `X`.
Args:
x (Variable): The input N-D Tensor.
scale(float, optional): lambda in selu activation function,
the default value is 1.0507009873554804934193349852946.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
alpha(float, optional): alpha in selu activation function,
the default value is 1.6732632423543772848170429916717.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.selu(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
attrs = {}
if scale is not None:
attrs["scale"] = scale
if alpha is not None:
attrs["alpha"] = alpha
helper.append_op(
type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs)
return out
def mean_iou(input, label, num_classes):
"""
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
.. math::
IOU = \\frac{true\_positive}{(true\_positive + false\_positive + false\_negative)}.
The predictions are accumulated in a confusion matrix and mean-IOU
is then calculated from it.
Parameters:
input (Variable): A n-D Tensor of prediction results for semantic labels with type int32 or int64.
label (Variable): A Tensor of ground truth labels with type int32 or int64.
Its shape should be the same as input.
num_classes (int32): The possible number of labels.
Returns:
Three Variables.
- mean_iou(Variable) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \
Data type is float32.
- out_wrong(Variable) : A 1-D Tensor with shape [num_classes]. Data type is int32. \
The wrong numbers of each class.
- out_correct(Variable): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class.
Examples:
.. code-block:: python
import paddle.fluid as fluid
iou_shape = [None, 32, 32]
num_classes = 5
predict = fluid.data(name='predict', shape=iou_shape, dtype='int64')
label = fluid.data(name='label', shape=iou_shape, dtype='int64')
mean_iou, out_wrong, out_correct = fluid.layers.mean_iou(predict, label,
num_classes)
"""
helper = LayerHelper('mean_iou', **locals())
dtype = helper.input_dtype()
out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
out_wrong = helper.create_variable_for_type_inference(dtype='int32')
out_correct = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="mean_iou",
inputs={"Predictions": input,
"Labels": label},
outputs={
"OutMeanIou": out_mean_iou,
"OutWrong": out_wrong,
"OutCorrect": out_correct
},
attrs={"num_classes": num_classes})
return out_mean_iou, out_wrong, out_correct
def crop(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
**Warning:** THIS OP IS DEPRECATED. It will be removed in the future version.
Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead.
.. code-block:: text
* Case 1:
Given
X = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]],
and
shape = [2, 2],
offsets = [0, 1],
output is:
Out = [[1, 2],
[3, 4]].
* Case 2:
Given
X = [[0, 1, 2, 5, 0]
[0, 3, 4, 6, 0]
[0, 0, 0, 0, 0]],
and shape is tensor
shape = [[0, 0, 0]
[0, 0, 0]]
and
offsets = [0, 1],
output is:
Out = [[1, 2, 5],
[3, 4, 6]].
Parameters:
x (Variable): Tensor, data type can be float32 or float64.
shape (Variable|list/tuple of integers): The output shape is specified
by `shape`, which can be a Tensor or a list/tuple of integers.
If it is a Tensor, it's rank must be the same as `x` , only
it's shape will be used, and the value of it will be ignored. This way
is suitable for the case that the output shape may be changed each
iteration. If it is a list/tuple of integers, it's length must be the same
as the rank of `x`
offsets (Variable|list/tuple of integers|None): Specifies the cropping
offsets at each dimension. It can be a Tensor or a list/tuple
of integers. If it is a Tensor, it's rank must be the same as `x`.
This way is suitable for the case that the offsets may be changed
each iteration. If it is a list/tuple of integers, it's length must be the
same as the rank of `x`. If None, the offsets are 0 at each dimension.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name` . Usually name is no need to set and
None by default.
Returns:
The cropped Tensor, which has the same rank and data type with `x`
Return Type:
Variable
Raises:
ValueError: If shape is not a list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32")
y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32")
crop = fluid.layers.crop(x, shape=y)
# or
z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 2, 3])
"""
helper = LayerHelper('crop', **locals())
if not (isinstance(shape, list) or isinstance(shape, tuple) or \
isinstance(shape, Variable)):
raise ValueError("The shape should be a list, tuple or Variable.")
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
if isinstance(shape, Variable):
ipts['Y'] = shape
else:
attrs['shape'] = shape
if isinstance(offsets, Variable):
ipts['Offsets'] = offsets
else:
attrs['offsets'] = offsets
helper.append_op(
type='crop',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def crop_tensor(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
.. code-block:: text
* Case 1 (input is a 2-D Tensor):
Input:
X.shape = [3, 5]
X.data = [[0, 1, 2, 0, 0],
[0, 3, 4, 0, 0],
[0, 0, 0, 0, 0]]
Parameters:
shape = [2, 2]
offsets = [0, 1]
Output:
Out.shape = [2, 2]
Out.data = [[1, 2],
[3, 4]]
* Case 2 (input is a 3-D Tensor):
Input:
X.shape = [2, 3, 4]
X.data = [[[0, 1, 2, 3],
[0, 5, 6, 7],
[0, 0, 0, 0]],
[[0, 3, 4, 5],
[0, 6, 7, 8],
[0, 0, 0, 0]]]
Parameters:
shape = [2, 2, -1]
offsets = [0, 0, 1]
Output:
Out.shape = [2, 2, 3]
Out.data = [[[1, 2, 3],
[5, 6, 7]],
[[3, 4, 5],
[6, 7, 8]]]
Parameters:
x (Variable): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
shape (list|tuple|Variable): The output shape is specified
by `shape`. Its data type is int32. If a list/tuple, it's length must be
the same as the dimension size of `x`. If a Variable, it should be a 1-D Tensor.
When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the shape may
be changed each iteration.
offsets (list|tuple|Variable, optional): Specifies the cropping
offsets at each dimension. Its data type is int32. If a list/tuple, it's length
must be the same as the dimension size of `x`. If a Variable, it should be a 1-D
Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the offsets may be changed
each iteration. Default: None, the offsets are 0 at each dimension.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: The cropped Tensor has same data type with `x`.
Raises:
TypeError: If the data type of `x` is not in: float32, float64, int32, int64.
TypeError: If `shape` is not a list, tuple or Variable.
TypeError: If the data type of `shape` is not int32.
TypeError: If `offsets` is not None and not a list, tuple or Variable.
TypeError: If the data type of `offsets` is not int32.
ValueError: If the element in `offsets` is less than zero.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32")
# x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime.
# shape is a 1-D Tensor
crop_shape = fluid.data(name="crop_shape", shape=[3], dtype="int32")
crop0 = fluid.layers.crop_tensor(x, shape=crop_shape)
# crop0.shape = [-1, -1, -1], it means crop0.shape[0] = x.shape[0] in runtime.
# or shape is a list in which each element is a constant
crop1 = fluid.layers.crop_tensor(x, shape=[-1, -1, 3], offsets=[0, 1, 0])
# crop1.shape = [-1, 2, 3]
# or shape is a list in which each element is a constant or Variable
y = fluid.data(name="y", shape=[3, 8, 8], dtype="float32")
dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
crop2 = fluid.layers.crop_tensor(y, shape=[3, dim1, 4])
# crop2.shape = [3, -1, 4]
# offsets is a 1-D Tensor
crop_offsets = fluid.data(name="crop_offsets", shape=[3], dtype="int32")
crop3 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=crop_offsets)
# crop3.shape = [-1, 2, 3]
# offsets is a list in which each element is a constant or Variable
offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32")
crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var])
# crop4.shape = [-1, 2, 3]
"""
helper = LayerHelper('crop_tensor', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
'crop_tensor')
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(shape_val))
if shape_val == 0:
raise ValueError(
"Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
% str(shape_val))
if shape_val < -1:
raise ValueError(
"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
% str(shape_val))
def _attr_offsets_check(offset_val):
if not isinstance(offset_val, int):
raise TypeError(
"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(offset_val))
if offset_val < 0:
raise ValueError(
"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
% str(offset_val))
if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif utils._contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
offsets_attr.append(-1)
else:
_attr_offsets_check(dim)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_offsets_tensor.append(temp_out)
offsets_attr.append(dim)
ipts['OffsetsTensor'] = new_offsets_tensor
attrs['offsets'] = offsets_attr
else:
for offset in offsets:
_attr_offsets_check(offset)
attrs['offsets'] = offsets
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif utils._contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
shape_attr.append(0)
else:
_attr_shape_check(dim_size)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
shape_attr.append(dim_size)
ipts['ShapeTensor'] = new_shape_tensor
attrs['shape'] = shape_attr
else:
for dim_size in shape:
_attr_shape_check(dim_size)
attrs['shape'] = shape
helper.append_op(
type='crop_tensor',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def affine_grid(theta, out_shape, name=None):
"""
It generates a grid of (x,y) coordinates using the parameters of
the affine transformation that correspond to a set of points where
the input feature map should be sampled to produce the transformed
output feature map.
Args:
theta (Variable) - A Tensor with shape [N, 2, 3]. It contains a batch of affine transform parameters.
The data type can be float32 or float64.
out_shape (Variable | list | tuple): The shape of target output with format [batch_size, channel, height, width].
``out_shape`` can be a Tensor or a list or tuple. The data
type must be int32.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with shape [batch_size, H, W, 2] while 'H' and 'W' are the height and width of feature map in affine transformation. The data type is the same as `theta`.
Raises:
ValueError: If the type of arguments is not supported.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.CPUPlace()
theta = fluid.data(name="x", shape=[None, 2, 3], dtype="float32")
out_shape = fluid.data(name="y", shape=[4], dtype="int32")
grid_0 = fluid.layers.affine_grid(theta, out_shape)
grid_1 = fluid.layers.affine_grid(theta, [5, 3, 28, 28])
batch_size=2
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output= exe.run(feed={"x": np.random.rand(batch_size,2,3).astype("float32"),
"y": np.array([5, 3, 28, 28]).astype("int32")},
fetch_list=[grid_0.name, grid_1.name])
print(output[0])
print(output[1])
"""
helper = LayerHelper('affine_grid')
if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \
isinstance(out_shape, Variable)):
raise ValueError("The out_shape should be a list, tuple or Variable.")
if not isinstance(theta, Variable):
raise ValueError("The theta should be a Variable.")
out = helper.create_variable_for_type_inference(theta.dtype)
ipts = {'Theta': theta}
attrs = {}
if isinstance(out_shape, Variable):
ipts['OutputShape'] = out_shape
else:
attrs['output_shape'] = out_shape
helper.append_op(
type='affine_grid',
inputs=ipts,
outputs={'Output': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def pad2d(input,
paddings=[0, 0, 0, 0],
mode='constant',
pad_value=0.0,
data_format="NCHW",
name=None):
"""
Pad 2-d images according to 'paddings' and 'mode'.
If mode is 'reflect', paddings[0] and paddings[1] must be no greater
than height-1. And the width dimension has the same condition.
Parameters:
input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format, which is a 4-D Tensor with data type float32.
paddings (Variable | List[int32]): The padding size. If padding is a List, it must
contain four integers, (padding_top, padding_bottom, padding_left, padding_right).
Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32.
Default is [0, 0, 0, 0].
mode (str): Three modes: 'constant' (default), 'reflect', 'edge' .
When in 'constant' mode, this op uses a constant value to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'edge' mode, uses input boundaries to pad the input tensor.
Default is 'constant'
pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
data_format (str): An string from: "NHWC", "NCHW". Specify the data format of
the input data.
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns: a 4-D Tensor padded according to paddings and mode and data type is same as input.
Return Type: Variable
Examples:
.. code-block:: text
Input = [[[[1., 2., 3.],
[4., 5., 6.]]]]
Case 0:
paddings = [0, 1, 2, 3],
mode = 'constant'
pad_value = 0
Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
[0., 0., 4., 5., 6., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]
Case 1:
paddings = [0, 1, 2, 1],
mode = 'reflect'
Out = [[[[3., 2., 1., 2., 3., 2.],
[6., 5., 4., 5., 6., 5.],
[3., 2., 1., 2., 3., 2.]]]]
Case 2:
paddings = [0, 1, 2, 1],
mode = 'edge'
Out = [[[[1., 1., 1., 2., 3., 3.],
[4., 4., 4., 5., 6., 6.],
[4., 4., 4., 5., 6., 6.]]]]
Code Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
result = fluid.layers.pad2d(input=data, paddings=[0, 1, 2, 3], mode='reflect')
"""
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
inputs = {'X': [input]}
if isinstance(paddings, Variable):
inputs['Paddings'] = [paddings]
attrs['paddings'] = []
else:
attrs['paddings'] = paddings
helper = LayerHelper('pad2d', **locals())
assert mode in ['reflect', 'edge', 'constant'
], "mode should be one of constant, reflect, edge."
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
return out
@templatedoc()
def elu(x, alpha=1.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|1.0): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_elu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_elu)
y = fluid.layers.elu(x, alpha=0.2)
print(y.numpy())
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@templatedoc()
def relu6(x, threshold=6.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(float, optional): ${threshold_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[2.5,7.8]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu6(x=x1, threshold=6.0)
print(out1.numpy())
# [[0. 0. ]
# [2.5 6. ]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
@templatedoc()
def pow(x, factor=1.0, name=None):
"""
This is Pow Activation Operator.
:math:`out = x^{factor}`
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0)
# y_1 is x^{2.0}
# example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
"""
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'],
'pow')
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:
attrs['factor'] = factor
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
scale_a(${scale_a_type}|2.0 / 3.0): ${scale_a_comment}
scale_b(${scale_b_type}|1.7159): ${scale_b_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3])
result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.random(size=(3, 3)).astype('float32')
output= exe.run(feed={"input": x},
fetch_list=[result])
print(output)
#[array([[0.626466 , 0.89842904, 0.7501062 ],
# [0.25147712, 0.7484996 , 0.22902708],
# [0.62705994, 0.23110689, 0.56902856]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='stanh',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale_a': scale_a,
'scale_b': scale_b})
return out
@templatedoc()
def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
"""
${comment}
Parameters:
x (${x_type}): ${x_comment}
slope (float, optional): ${slope_comment}
offset (float, optional): ${offset_comment}
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_sigmoid')
helper = LayerHelper('hard_sigmoid', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
@templatedoc()
def swish(x, beta=1.0, name=None):
"""
Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
Equation:
.. math::
out = \\frac{x}{1 + e^{- beta * x}}
Args:
x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
beta(float): Constant beta of swish operator, default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.swish(x, beta=2.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[-1.1239197 , 1.3391294 , 0.03921051],
# [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32)
y_np
# array([[-0.2756806 , 1.0610548 , 0.01998957],
# [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.swish(x)
y_np = y.numpy()
data
# array([[-0.0816701 , 1.1603649 , -0.88325626],
# [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32)
y_np
# array([[-0.03916847, 0.8835007 , -0.25835553],
# [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32)
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': beta})
return out
def prelu(x, mode, param_attr=None, name=None):
"""
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
There are three modes for the activation:
.. code-block:: text
all: All elements share same alpha.
channel: Elements in same channel share same alpha.
element: All elements do not share alpha. Each element has its own alpha.
Args:
x (Variable): The input Tensor or LoDTensor with data type float32.
mode (str): The mode for weight sharing.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight (alpha), it can be create by ParamAttr. None by default.
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The tensor or LoDTensor with the same shape as input.
The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
output = fluid.layers.prelu(
x,mode,param_attr=ParamAttr(name='alpha'))
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu')
helper = LayerHelper('prelu', **locals())
if mode not in ['all', 'channel', 'element']:
raise ValueError('mode should be one of all, channel, element.')
alpha_shape = [1]
if mode == 'channel':
alpha_shape = [1, x.shape[1], 1, 1]
elif mode == 'element':
alpha_shape = [1, x.shape[1], x.shape[2], x.shape[3]]
dtype = helper.input_dtype(input_param_name='x')
alpha = helper.create_parameter(
attr=helper.param_attr,
shape=alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(0.25))
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prelu",
inputs={"X": x,
'Alpha': alpha},
attrs={"mode": mode},
outputs={"Out": out})
return out
@templatedoc()
def brelu(x, t_min=0.0, t_max=24.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
t_min(${t_min_type}|0.0): ${t_min_comment}
t_max(${t_max_type}|24.0): ${t_max_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_brelu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_brelu)
y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0)
print(y.numpy())
#[[ 1. 6.]
#[ 1. 10.]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
helper = LayerHelper('brelu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': t_min,
't_max': t_max})
return out
@templatedoc()
def leaky_relu(x, alpha=0.02, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[2], dtype="float32")
res = fluid.layers.leaky_relu(x, alpha=0.1)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[-0.1, 2], [3, -0.4]]
"""
if in_dygraph_mode():
return core.ops.leaky_relu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'leaky_relu')
inputs = {'X': [x]}
attrs = {'alpha': alpha}
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='leaky_relu', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def soft_relu(x, threshold=40.0, name=None):
"""
SoftRelu Activation Operator.
$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$
Args:
x(Variable): Input of soft_relu operator. Data type can be float32, float64.
threshold(float, optional): The threshold value of soft_relu, default value being 40.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.soft_relu(inputs, threshold=20.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'soft_relu')
helper = LayerHelper('soft_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='soft_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def flatten(x, axis=1, name=None):
"""
**Flatten op**
Flatten the input tensor into a 2D matrix.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
axis = 2
We get:
Out.shape = (3 * 100, 4 * 100)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
axis = 0
We get:
Out.shape = (1, 3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of rank >= axis. A tensor with type float32,
float64, int8, int32, int64.
axis (int): Indicate up to which input dimensions (exclusive) should
be flattened to the outer dimension of the output.
The value for axis must be in the range [0, R], where R
is the rank of the input tensor. Default: 1.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A 2D tensor with the contents of the input tensor, with input \
dimensions up to axis flattened to the outer dimension of \
the output and remaining input dimensions flattened into the \
inner dimension of the output. A Tensor with type same as input x.
Raises:
ValueError: If x is not a variable.
ValueError: If axis is not in range [0, rank(x)].
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32")
# x shape is [4, 4, 3]
out = fluid.layers.flatten(x=x, axis=2)
# out shape is [16, 3]
"""
helper = LayerHelper('flatten', **locals())
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Variable")
if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
raise ValueError("The axis should be a int, and in range [0, rank(x)]")
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten2',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"axis": axis})
return out
def stack(x, axis=0):
"""
This OP stacks all the inputs :code:`x` along axis.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (Variable|list(Variable)): Input :code:`x` can be a single Tensor, a :code:`list` of Tensors.
If :code:`x` is a :code:`list`, the shapes of all these Tensors
must be the same. Supposing input is N dims
Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims
Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`.
Support data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is :math:`[-(R+1), R+1)`.
R is the first tensor of inputs. If ``axis`` < 0, :math:`axis=axis+rank(x[0])+1`.
The default value of axis is 0.
Returns:
Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32')
x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32')
# stack Tensor list
data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2]
data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2]
# stack single Tensor
data = layers.stack(x1) # stack according to axis 0, data.shape=[1, None, 1, 2]
"""
helper = LayerHelper('stack', **locals())
axis = 0 if axis is None else axis
if not isinstance(x, list) and not isinstance(x, tuple):
x = [x]
out = helper.create_variable_for_type_inference(x[0].dtype)
if not in_dygraph_mode() and \
x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': True})
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis})
return out
@templatedoc(op_type="filter_by_instag")
def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0):
"""
**Filter By Instag Layer**
This function filter a batch of ins by instag,
There are multiple ins, and every ins belongs to some tags.
We can specify some tags we want. So the ins which belongs to that tags
remains in the output, and others removed.
For example, one batch has 4 ins. Every ins has its tag list.
| Ins | Ins_Tag |
|:-----:|:------:|
| 0 | 0, 1 |
| 1 | 1, 3 |
| 2 | 0, 3 |
| 3 | 2, 6 |
And Lod is [1,1,1,1]
And the filter tags [1]
From the definition above, ins which has tag 1 can pass the filter
So Ins 0 and Ins 1 can pass and be seen in the output,
Ins 2 and 3 cannot pass because they do not has tag 1.
Actually, if is_lod is false, it is normal tensor that equals to
lod_tensor with all 1, similar to the example above.
Args:
ins (Variable): Input Variable (LoDTensor), usually it is 2D tensor
And first dimension can have lod info or not.
ins_tag (Variable): Input Variable (LoDTensor), usually it is 1D list
And split them by lod info
filter_tag (Variable): Input Variable (1D Tensor/List), usually it is
list that holds the tags.
is_lod (Bool): Boolean value to indicate ins is lod tensor or not.
out_val_if_empty(Int64): If the output after filter is empty, this value
will be set to Output tensor.
Returns:
Variable: filtered ins (LoDTensor) and loss weight (Tensor)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64')
ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64')
out, loss_weight = layers.filter_by_instag(ins, ins_tag, filter_tag, True)
"""
helper = LayerHelper('filter_by_instag', **locals())
out = helper.create_variable_for_type_inference(dtype=ins.dtype)
loss_weight = helper.create_variable_for_type_inference(dtype=np.float64)
mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype)
helper.append_op(
type='filter_by_instag',
inputs={'Ins': ins,
'Ins_tag': ins_tag,
'Filter_tag': filter_tag},
outputs={'Out': out,
'LossWeight': loss_weight,
'IndexMap': mmap},
attrs={'is_lod': is_lod,
'out_val_if_empty': out_val_if_empty})
return [out, loss_weight]
def unstack(x, axis=0, num=None):
"""
**UnStack Layer**
This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
raised.
Args:
x (Variable): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.
Returns:
list(Variable): The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
Raises:
ValueError: If x.shape[axis] <= 0 or axis is not in range [-D, D).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3, 5], dtype='float32') # create a tensor with shape=[2, 3, 5]
y = fluid.layers.unstack(x, axis=1) # unstack with second axis, which results 3 tensors with shape=[2, 5]
"""
helper = LayerHelper('unstack', **locals())
if num is None:
if axis is None or x.shape[axis] <= 0:
raise ValueError('unknown unstack number')
else:
num = x.shape[axis]
outs = []
for _ in range(num):
outs.append(helper.create_variable_for_type_inference(x.dtype))
helper.append_op(
type='unstack',
inputs={'X': [x]},
outputs={'Y': outs},
attrs={'axis': axis,
'num': num})
return outs
def expand(x, expand_times, name=None):
"""
This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
Expand times number for each dimension of ``x`` .
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
Raises:
TypeError: The type of ``expand_times`` must be list, tuple or Variable.
ValueError: The elements of ``expand_times`` cannot be negative.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
# the shape of expanded_1 is [2, 6, 2].
# example 2:
data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
"""
if in_dygraph_mode():
if isinstance(expand_times, (list, tuple)):
if utils._contain_var(expand_times):
raise TypeError(
"The type of 'expand_times' in expand must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'expand_times' in expand must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
return core.ops.expand(x, 'expand_times', expand_times)
inputs = {"X": [x]}
attrs = {}
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
"expand op bool date type must set the stop_gradient to be False")
helper = LayerHelper('expand', input=x, **locals())
def get_attr_expand_times(list_expand_times):
attrs_expand_times = []
for idx, times in enumerate(list_expand_times):
if isinstance(times, Variable):
attrs_expand_times.append(-1)
else:
attrs_expand_times.append(times)
assert times > 0, (
"Each element given in expand_times must not be negative.")
return attrs_expand_times
def get_new_expand_times_tensor(list_expand_times):
new_expand_times_tensor = []
for ele in list_expand_times:
if isinstance(ele, Variable):
ele.stop_gradient = True
new_expand_times_tensor.append(ele)
else:
assert (isinstance(ele, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', ele, force_cpu=True, out=temp_out)
new_expand_times_tensor.append(temp_out)
return new_expand_times_tensor
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if utils._contain_var(expand_times):
inputs['expand_times_tensor'] = get_new_expand_times_tensor(
expand_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand_as(x, target_tensor, name=None):
"""
expand_as operator tiles to the input by given expand tensor. You should set expand tensor
for each dimension by providing tensor 'target_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'target_tensor' must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensor's shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A Tensor with dtype float64, float32, int32.
A tensor with rank in [1, 6].
target_tensor (Variable): A Tensor with dtype float64, float32, int32.
target_tensor for expanding to Input(X). Only use target_tensor'shape.
Returns:
Variable: A Tensor with dtype float64, float32, int32.
After expanding, size of each dimension of Output(Out) is equal to the size
of the corresponding dimension of target_tensor multiplying the corresponding
value given by target_tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
target_tensor = fluid.layers.data(
name="target_tensor", shape=[-1,20], dtype='float64')
result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,10)
y = np.random.rand(3,20)
output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
print(output[0].shape)
#(3,20)
"""
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': x, 'target_tensor': target_tensor}
helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_
@templatedoc()
def uniform_random_batch_size_like(input,
shape,
dtype='float32',
input_dim_idx=0,
output_dim_idx=0,
min=-1.0,
max=1.0,
seed=0):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='uniform_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'min': min,
'max': max,
'seed': seed,
'dtype': c_dtype
})
return out
@templatedoc()
def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
"""
Generate a random tensor whose data is drawn from a Gaussian distribution.
Args:
shape (Tuple[int] | List[int]): Shape of the generated random tensor.
mean (float): Mean of the random tensor, defaults to 0.0.
std (float): Standard deviation of the random tensor, defaults to 1.0.
seed (int): ${seed_comment}
dtype(np.dtype | core.VarDesc.VarType | str): Output data type, float32 or float64.
Returns:
Variable: Random tensor whose data is drawn from a Gaussian distribution, dtype: flaot32 or float64 as specified.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.layers.gaussian_random((2, 3), std=2., seed=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
exe.run(start)
x_np, = exe.run(main, feed={}, fetch_list=[x])
x_np
# array([[2.3060477, 2.676496 , 3.9911983],
# [0.9990833, 2.8675377, 2.2279181]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10)
x_np = x.numpy()
x_np
# array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ],
# [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
"""
helper = LayerHelper('gaussian_random', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random',
outputs={'Out': out},
attrs={
'shape': shape,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype,
'use_mkldnn': False
})
return out
@templatedoc()
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample.
Parameters:
x (Variable): 2-D tensor, [batch_size, input_feature_dimensions]
min (Float): minimum , default 0.0.
max (Float): maximum, default 1.0.
seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time.
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
Variable: sampling tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name="X",
shape=[13, 11],
dtype='float32')
out = fluid.layers.sampling_id(x)
"""
helper = LayerHelper('sampling_id', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sampling_id',
inputs={'X': x},
outputs={'Out': out},
attrs={'min': min,
'max': max,
'seed': seed})
return out
@templatedoc()
def gaussian_random_batch_size_like(input,
shape,
input_dim_idx=0,
output_dim_idx=0,
mean=0.0,
std=1.0,
seed=0,
dtype='float32'):
"""
${comment}
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (int): ${input_dim_idx_comment}
output_dim_idx (int): ${output_dim_idx_comment}
mean (float): ${mean_comment}
std (float): ${std_comment}
seed (int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data, float32 or float_64.
Returns:
out (Variable): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[13, 11], dtype='float32')
out = fluid.layers.gaussian_random_batch_size_like(
input, shape=[-1, 11], mean=1.0, std=2.0)
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype
})
return out
@templatedoc()
def sum(x):
"""
${comment}
Case 1:
::
Input:
Input. Shape = [2, 3]
Input = [[1, 2, 3],
[4, 5, 6]]
Output:
The output. Shape = [2, 3]
Output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
::
Input:
First input:
Input1. Shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
Input2. Shape = [2, 3]
Input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
The output. Shape = [2, 3]
Output = [[8, 10, 12],
[14, 16, 18]]
Args:
x (Variable|list(Variable)): ${x_comment}
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
sum = fluid.layers.sum([input0, input1])
# You can print out 'sum' via executor.
out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program())
# The printed result is:
# 1570701754 the sum of input0 and input1: The place is:CPUPlace
# Tensor[sum_0.tmp_0]
# shape: [2,3,]
# dtype: l
# data: 8,8,8,8,8,8,
# the sum of input0 and input1 is 2-D Tensor with shape [2,3].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
return paddle.elementwise_sum(x)
@templatedoc()
def slice(input, axes, starts, ends):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name="input", shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
if in_dygraph_mode():
infer_flags = list(1 for i in range(len(axes)))
if isinstance(starts, (list, tuple)):
if utils._contain_var(starts):
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
if isinstance(ends, (list, tuple)):
if utils._contain_var(ends):
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
return core.ops.slice(input, 'axes', axes, 'starts', starts, 'ends',
ends, 'infer_flags', infer_flags)
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
helper = LayerHelper('slice', **locals())
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
@templatedoc()
def strided_slice(input, axes, starts, ends, strides):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor .
It represents slice step of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
TypeError: The type of ``strides`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name="input", shape=[3, 4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
"""
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
if not isinstance(strides, (list, tuple, Variable)):
raise ValueError(
"Input strides must be an Variable, python list or tuple.")
helper = LayerHelper('strided_slice', **locals())
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
inputs = {'Input': input}
attrs = {
'axes': axes,
'starts': starts,
'ends': ends,
'strides': strides,
'infer_flags': infer_flags
}
else:
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# strides
if isinstance(strides, Variable):
strides.stop_gradient = True
inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)):
attrs['strides'] = []
if utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = strides
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def shape(input):
"""
**Shape Layer**
Get the shape of the input.
Args:
input (Variable): The input N-D Tensor. Datatype can be float32, float64, int32, int64.
Returns:
Variable (Tensor): The shape of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.ones((3, 100, 100)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape', inputs={'Input': input}, outputs={'Out': out})
return out
def rank(input):
"""
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args:
input (Variable): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Variable, the output data type is int32.: The 0-D tensor with the dimensions of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[3, 100, 100], dtype="float32")
rank = fluid.layers.rank(input) # rank=(3,)
"""
ndims = len(input.shape)
out = assign(np.array(ndims, 'int32'))
return out
def size(input):
"""
**Size Layer**
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1].
Args:
input (Variable): The input variable.
Returns:
Variable: The number of elements for the input variable.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
input = layers.data(
name="input", shape=[3, 100], dtype="float32", append_batch_size=False)
rank = layers.size(input) # 300
"""
helper = LayerHelper('size', **locals())
out = helper.create_variable_for_type_inference(dtype='int64')
helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out})
return out
def _elementwise_op(helper):
op_type = helper.layer_type
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
Scale operator.
Putting scale and bias to the input Tensor as following:
``bias_after_scale`` is True:
.. math::
Out=scale*X+bias
``bias_after_scale`` is False:
.. math::
Out=scale*(X+bias)
Args:
x(Variable): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
scale(float|Variable): The scale factor of the input, it should be a float number or a Variable with shape [1] and data type as float32.
bias(float): The bias to be put on the input.
bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable(Tensor|LoDTensor): Output tensor of scale operator, with shape and data type same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)]
.. code-block:: python
# scale with parameter scale as Variable
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
scale = fluid.layers.data(name="scale", shape=[1], dtype='float32',
append_batch_size=False)
output = fluid.layers.scale(inputs, scale = scale, bias = 1.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
scale_np = np.array([2.]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img, 'scale':scale_np}, fetch_list=[output])
print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)]
"""
check_variable_and_dtype(
x, "x",
['float32', 'float64', 'uint8', 'int16', 'int32', 'in64', 'uint8'],
"scale")
if in_dygraph_mode():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out = core.ops.scale(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out)
inputs = {'X': [x]}
attrs = {
'bias': float(bias),
'bias_after_scale': bias_after_scale,
}
if isinstance(scale, Variable):
inputs['ScaleTensor'] = [scale]
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return helper.append_activation(out)
def elementwise_add(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_add(x, y)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [3., 8., 6.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=1)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=3)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_add')
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
def elementwise_div(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_div(x, y)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 0.6, 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=1)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=3)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_div')
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_sub(x, y)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [1., -2., 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=1)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=3)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub')
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
def elementwise_mul(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_mul(x, y)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 15., 8.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=1)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=3)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mul')
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_max(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_max(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 5, 4]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_max(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_max')
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
def elementwise_min(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_min(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 2]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_min(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_min')
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_pow(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 243, 16]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_pow')
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
def elementwise_mod(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 6, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_mod(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 3]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mod')
return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 7, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_floordiv(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[3, 2, 1]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_floordiv')
return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
for func in [
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
elementwise_max,
elementwise_pow,
elementwise_min,
elementwise_mod,
elementwise_floordiv,
]:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"axis (int32, optional): If X.dimension != Y.dimension, \
Y.dimension must be a subsequence of x.dimension. \
And axis is the start dimension index for broadcasting Y onto X. ",
"act (string, optional): Activation applied to the output. \
Default is None. Details: :ref:`api_guide_activations_en` ",
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
],
skip_attrs_set={"x_data_format", "y_data_format", "axis"
}) + """\n""" + str(func.__doc__)
for func in []:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output."
])
func.__doc__ = func.__doc__ + """
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5)
x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32')
y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32')
z0 = fluid.layers.%s(x0, y0)
# example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5)
x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32')
y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32')
z1 = fluid.layers.%s(x1, y1)
# example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32')
y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32')
z2 = fluid.layers.%s(x2, y2, axis=2)
# example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32')
y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32')
z3 = fluid.layers.%s(x3, y3, axis=1)
# example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32')
y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32')
z4 = fluid.layers.%s(x4, y4, axis=0)
# example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32')
y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32')
z5 = fluid.layers.%s(x5, y5, axis=0)
""" % (func.__name__, func.__name__, func.__name__, func.__name__,
func.__name__, func.__name__)
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
check_variable_and_dtype(x, "x", ["bool"], op_name)
if y is not None:
check_variable_and_dtype(y, "y", ["bool"], op_name)
if out is not None:
check_variable_and_dtype(out, "out", [convert_dtype(x.dtype)], op_name)
helper = LayerHelper(op_name, **locals())
if binary_op:
assert x.dtype == y.dtype
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x,
"Y": y}, outputs={"Out": out})
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
return out
@templatedoc()
def logical_and(x, y, out=None, name=None):
"""
logical_and Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \land Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_and(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_and(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, False], [False, False]]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_or(x, y, out=None, name=None):
"""
logical_or Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \lor Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_or(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_or(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, True], [False, True]]
"""
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_xor(x, y, out=None, name=None):
"""
logical_xor Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = (X \lor Y) \land \lnot (X \land Y)
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_xor(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_xor(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[False, True], [False, True]]
"""
return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_not(x, out=None, name=None):
"""
logical_not Operator
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = \lnot X
Args:
x(${x_type}): ${x_comment}
out(LoDTensor/Tensor): The LoDTensor/Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
res = fluid.layers.logical_not(x)
# The comment lists another avaliable method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_not(x, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[False, True]]
"""
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
@templatedoc()
def clip(x, min, max, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
min(float): ${min_comment}
max(float): ${max_comment}
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[1], dtype='float32')
reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
"""
helper = LayerHelper("clip", **locals())
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip",
inputs={"X": x},
attrs={"min": min,
"max": max},
outputs={"Out": out})
return out
@templatedoc()
def clip_by_norm(x, max_norm, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[None, 1], dtype='float32')
reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
"""
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out})
return out
@templatedoc()
def mean(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
name='data', shape=[2, 3], dtype='float32')
mean = fluid.layers.mean(input)
"""
if in_dygraph_mode():
return core.ops.mean(x)
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
return out
@templatedoc()
def merge_selected_rows(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
var = b.create_var(
name="X", dtype="float32", persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
y = fluid.layers.merge_selected_rows(var)
"""
helper = LayerHelper("merge_selected_rows", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="merge_selected_rows",
inputs={"X": x},
attrs={},
outputs={"Out": out})
return out
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$.
Args:
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32")
dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32")
output = fluid.layers.mul(dataX, dataY,
x_num_col_dims = 1,
y_num_col_dims = 1)
"""
if in_dygraph_mode():
return core.ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
'y_num_col_dims', y_num_col_dims)
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mul", inputs={"X": x,
"Y": y}, attrs=attrs, outputs={"Out": out})
return out
@templatedoc()
def maxout(x, groups, name=None, axis=1):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
groups(int): ${groups_comment}
axis(int, optional): ${axis_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: ${out_comment}
Raises:
ValueError: If `axis` is not 1, -1 or 3.
ValueError: If the number of input channels can not be divisible by `groups`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data',
shape=[None, 256, 32, 32],
dtype='float32')
out = fluid.layers.maxout(input, groups=2)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
helper = LayerHelper("maxout", **locals())
if axis not in [1, -1, 3]:
raise ValueError(
"Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
"Attr(axis): %s." % str(axis))
if axis == -1:
axis = 3
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="maxout",
inputs={"X": x},
attrs={"groups": groups,
"axis": axis},
outputs={"Out": out})
return out
def space_to_depth(x, blocksize, name=None):
"""
Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width]
This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \
theinput LoDtensor where values from the height and width dimensions are moved to the channel \
dimension.
The attr blocksize indicates the input block size.
space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \
according to blocksize to construct output with shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]:
- Non-overlapping blocks of size block_size x block size are rearranged into depth at each location.
- The Y, X coordinates within each block of the input become the high order component of the output channel index
- channel should be divisible by square of blocksize
- height, width should be divsible by blocksize
This OP is useful for resizing the activations between convolutions \
(but keeping all data)
.. code-block:: text
Given the input x with the shape [1, 1, 4, 4]:
x.data = [[[[1, 2, 5, 6],
[3, 4, 7, 8],
[9, 10, 13, 14],
[11, 12, 15, 16]]]]
blocksize = 2
then get the output with the shape [1, 4, 2, 2]:
out.data = [[[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[13, 14], [15, 16]]]]
Args:
x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel, height, width]
blocksize (int): The blocksize to select the element on each feature map should be > 2
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The output, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]
Return Type: Variable
Raises:
TypeError: blocksize type must be int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(
name='data', shape=[1, 4, 2, 2], dtype='float32')
space_to_depthed = fluid.layers.space_to_depth(
x=data, blocksize=2)
exe = fluid.Executor(fluid.CPUPlace())
data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32')
print(data_np)
#array([[[[ 0., 1.], [ 2., 3.]],
# [[ 4., 5.], [ 6., 7.]],
# [[ 8., 9.], [10., 11.]],
# [[12., 13.], [14., 15.]]]], dtype=float32)
out_main = exe.run(fluid.default_main_program(),
feed={'data': data_np},
fetch_list=[space_to_depthed])
print(out_main)
#[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]],
# [[ 8.]], [[12.]], [[ 9.]], [[13.]],
# [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]],
# [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)]
"""
helper = LayerHelper("space_to_depth", **locals())
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
inputs={"X": x},
attrs={"blocksize": blocksize},
outputs={"Out": out})
return out
def affine_channel(x,
scale=None,
bias=None,
data_layout='NCHW',
name=None,
act=None):
"""
Applies a separate affine transformation to each channel of the input.
Useful for replacing spatial batch norm with its equivalent fixed
transformation. The input also can be 2D tensor and applies a affine
transformation in second dimension.
Args:
x (Variable): Feature map input can be a 4D tensor with order NCHW
or NHWC. It also can be a 2D tensor and the affine transformation
is applied in the second dimension.The data type is float32 or float64.
scale (Variable): 1D input of shape (C), the c-th element is the scale
factor of the affine transformation for the c-th channel of
the input.The data type is float32 or float64.
bias (Variable): 1D input of shape (C), the c-th element is the bias
of the affine transformation for the c-th channel of the input.
The data type is float32 or float64.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore
data_layout.
name (str, default None): The name of this layer. For more information,
please refer to :ref:`api_guide_Name` .
act (str, default None): Activation to be applied to the output of this layer.
Returns:
Variable: A tensor which has the same shape, data layout and data type with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out = fluid.layers.affine_channel(data,scale=input_scale,
bias=input_bias)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_array] = exe.run(test_program,
fetch_list=out,
feed={'data': np.ones([1,1,2,2]).astype('float32')})
# out_array is [[[[2.5, 2.5],
# [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
"""
helper = LayerHelper("affine_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="affine_channel",
inputs={"X": x,
'Scale': scale,
'Bias': bias},
attrs={"data_layout": data_layout},
outputs={"Out": out})
return helper.append_activation(out)
def similarity_focus(input, axis, indexes, name=None):
"""
SimilarityFocus Operator
Generate a similarity focus mask with the same shape of input using the following method:
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C).
2. For each index, find the largest numbers in the tensor T, so that the same
row and same column has at most one number(what it means is that if the
largest number has been found in the i-th row and the j-th column, then
the numbers in the i-th row or j-th column will be skipped. And then the
next largest number will be selected from the remaining numbers. Obviously
there will be min(B, C) numbers), and mark the corresponding position of the
3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for
each index.
3. Broadcast the 3-D similarity focus mask to the same shape of input X.
Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_
.. code-block:: text
* Example :
Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is
the number of channels and the shape of feature map is (A, B):
x.shape = (2, 3, 2, 2)
x.data = [[[[0.8, 0.1],
[0.4, 0.5]],
[[0.9, 0.7],
[0.9, 0.9]],
[[0.8, 0.9],
[0.1, 0.2]]],
[[[0.2, 0.5],
[0.3, 0.4]],
[[0.9, 0.7],
[0.8, 0.4]],
[[0.0, 0.2],
[0.4, 0.7]]]]
Given axis: 1 (the axis of the channel)
Given indexes: [0]
then we get a 4-D tensor out with the same shape of input x:
out.shape = (2, 3, 2, 2)
out.data = [[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]],
[[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]]]]
Args:
input(Variable): The input tensor variable(default float). It should
be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is
float32 or float64.
axis(int): Indicating the dimension to be selected. It can only be
1, 2 or 3.
indexes(list): Indicating the indexes of the selected dimension.
Returns:
Variable: A tensor variable with the same shape and same type \
as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[-1, 3, 2, 2], dtype='float32')
fluid.layers.similarity_focus(input=data, axis=1, indexes=[0])
"""
helper = LayerHelper('similarity_focus', **locals())
# check attrs
if isinstance(axis, int) is False:
raise TypeError("axis must be int type.")
if isinstance(indexes, list) is False:
raise TypeError("indexes must be list type.")
if axis != 1 and axis != 2 and axis != 3:
raise ValueError("axis must be 1, 2 or 3.")
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
outputs={'Out': out},
attrs={"axis": axis,
"indexes": indexes})
return out
def hash(input, hash_size, num_hash=1, name=None):
"""
This OP hash the input to an integer less than the hash_size.
The hash algorithm we used was xxHash - Extremely fast hash algorithm
(https://github.com/Cyan4973/xxHash/tree/v0.6.5)
Args:
input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64.
**Only support LoDTensor**.
num_hash(int, optional): The times of hash, default is 1.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A LoDTensor with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.core.CPUPlace()
x = fluid.data(name="x", shape=[1], dtype="int32", lod_level=1)
res = fluid.layers.hash(name="res",input=x, hash_size=1000, num_hash=4)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
in1 = np.array([[1,2],[3,4]]).astype("int32")
print(in1)
x_i = fluid.core.LoDTensor()
x_i.set(in1,place)
x_i.set_recursive_sequence_lengths([[0,2]])
res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
print(np.array(res[0]))
# [[[722]
# [407]
# [337]
# [395]]
# [[603]
# [590]
# [386]
# [901]]]
"""
helper = LayerHelper('hash', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
helper.append_op(
type='hash',
inputs={'X': input},
outputs={'Out': out},
attrs={'num_hash': num_hash,
'mod_by': hash_size})
return out
@templatedoc()
def grid_sampler(x, grid, name=None):
"""
This operation samples input X by using bilinear interpolation based on
flow field grid, which is usually generated by :code:`affine_grid` . The grid of
shape [N, H, W, 2] is the concatenation of (x, y) coordinates
with shape [N, H, W] each, where x is indexing the 4th dimension
(in width dimension) of input data x and y is indexing the 3rd
dimension (in height dimension), finally results is the bilinear
interpolation value of 4 nearest corner points. The output tensor
shape will be [N, C, H, W].
.. code-block:: text
Step 1:
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
.. code-block:: text
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
Step 2:
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
interpolate point value by 4 nearest points.
wn ------- y_n ------- en
| | |
| d_n |
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
x_w = floor(x) // west side x coord
x_e = x_w + 1 // east side x coord
y_n = floor(y) // north side y coord
y_s = y_s + 1 // south side y coord
d_w = grid_x - x_w // distance to west side
d_e = x_e - grid_x // distance to east side
d_n = grid_y - y_n // distance to north side
d_s = y_s - grid_y // distance to south side
wn = X[:, :, y_n, x_w] // north-west point value
en = X[:, :, y_n, x_e] // north-east point value
ws = X[:, :, y_s, x_w] // south-east point value
es = X[:, :, y_s, x_w] // north-east point value
output = wn * d_e * d_s + en * d_w * d_s
+ ws * d_e * d_n + es * d_w * d_n
Args:
x(Variable): The input tensor, which is a 4-D tensor with shape
[N, C, H, W], N is the batch size, C is the channel
number, H and W is the feature height and width.
The data type is float32 or float64.
grid(Variable): Input grid tensor of shape [N, H, W, 2]. The
data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: Output of shape [N, C, H, W] data samples input X
using bilnear interpolation based on input grid.
The data type is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# use with affine_grid
x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid)
"""
helper = LayerHelper("grid_sampler", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sampler')
if not isinstance(x, Variable):
return ValueError("The x should be a Variable")
if not isinstance(grid, Variable):
return ValueError("The grid should be a Variable")
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x, 'Grid': grid}
helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out})
return out
def log_loss(input, label, epsilon=1e-4, name=None):
"""
**Negative Log Loss Layer**
This layer accepts input predictions and target label and returns the
negative log loss.
.. math::
Out = -label * \\log{(input + \\epsilon)}
- (1 - label) * \\log{(1 - input + \\epsilon)}
Args:
input (Variable|list): A 2-D tensor with shape [N x 1], where N is the
batch size. This input is a probability computed
by the previous operator. Data type float32.
label (Variable|list): The ground truth which is a 2-D tensor with
shape [N x 1], where N is the batch size.
Data type float32.
epsilon (float, optional): A small number for numerical stability. Default 1e-4.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: A 2-D tensor with shape [N x 1], the negative log loss.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.data(name='label', shape=[None, 1], dtype='float32')
prob = fluid.data(name='prob', shape=[None, 1], dtype='float32')
cost = fluid.layers.log_loss(input=prob, label=label)
"""
helper = LayerHelper('log_loss', **locals())
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='log_loss',
inputs={'Predicted': [input],
'Labels': [label]},
outputs={'Loss': [loss]},
attrs={'epsilon': epsilon})
return loss
def add_position_encoding(input, alpha, beta, name=None):
"""
This operator performs weighted sum of input feature at each position
(position in the sequence) and the corresponding position encoding.
For more details of position encoding, please refer to `Attention Is All You
Need <http://arxiv.org/pdf/1706.03762.pdf>`_ .
The formula is as follows:
.. math::
PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})} \\\\
PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})} \\\\
Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i)
Where:
- :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`.
- :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos`
Args:
input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a
Tensor, the shape should be `[N, M, P]`, where `N` stands for
batch size, `M` for sequence length, `P` for the size of feature
dimension. If it is a LoDTensor, the shape should be `[N, P]`,
where `N` stands for the total sequence lengths in this mini-batch,
`P` for the size of feature. The data type should be float32 or float64.
alpha(float): Indicate the weight coefficient for `input` when performing
weighted sum.
beta(float): Indicate the weight coefficient for position encoding when
performing weighted sum.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.data(
name='tensor',
shape=[None, 64, 512],
dtype='float32')
position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
"""
helper = LayerHelper('add_position_encoding', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type="add_position_encoding",
inputs={"X": input},
outputs={"Out": out},
attrs={"alpha": alpha,
"beta": beta})
return out
def bilinear_tensor_product(x,
y,
size,
act=None,
name=None,
param_attr=None,
bias_attr=None):
"""
**Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N].
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr|None): To specify the bias parameter attribute.
Default: None, which means the default bias parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
Returns:
Variable: A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.
Examples:
.. code-block:: python
import paddle.fluid as fluid
layer1 = fluid.data("t1", shape=[-1, 5], dtype="float32")
layer2 = fluid.data("t2", shape=[-1, 4], dtype="float32")
tensor = fluid.layers.bilinear_tensor_product(x=layer1, y=layer2, size=1000)
"""
helper = LayerHelper('bilinear_tensor_product', **locals())
dtype = helper.input_dtype('x')
param_shape = [size, x.shape[1], y.shape[1]]
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
bias_size = [1, size]
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
inputs["Bias"] = bias
helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})
# add activation
return helper.append_activation(out)
@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
"""
This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor.
.. code-block:: text
input x is SelectedRows:
x.rows = [0, 5, 5, 4, 19]
x.height = 20
x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]]
Ouput is LoDTensor:
out.shape = [5, 2]
out.data = [[1, 1],
[2, 2],
[2, 2],
[3, 3],
[6, 6]]
Args:
x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor transformed from SelectedRows. The data type is same with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
out = fluid.layers.get_tensor_from_selected_rows(input)
"""
helper = LayerHelper('get_tensor_from_selected_rows', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='get_tensor_from_selected_rows',
inputs={'X': x},
outputs={'Out': out},
attrs={})
return out
def shuffle_channel(x, group, name=None):
"""
This operator shuffles the channels of input x.
It divide the input channels in each group into :attr:`group` subgroups,
and obtain a new order by selecting element from every subgroup one by one.
Please refer to the paper
https://arxiv.org/pdf/1707.01083.pdf
.. code-block:: text
Given a 4-D tensor input with the shape (N, C, H, W):
input.shape = (1, 4, 2, 2)
input.data =[[[[0.1, 0.2],
[0.2, 0.3]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Given group: 2
then we get a 4-D tensor out whth the same shape of input:
out.shape = (1, 4, 2, 2)
out.data = [[[[0.1, 0.2],
[0.2, 0.3]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Args:
x(Variable): The input tensor variable. It should be a 4-D tensor with shape [N, C, H, W]
group(int): Indicating the counts of subgroups, It should divide the number of channels.
Returns:
out(Variable): the channels shuffling result is a tensor variable with the
same shape and same type as the input.
Raises:
ValueError: If group is not an int type variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.shuffle_channel(x=input, group=2)
"""
helper = LayerHelper("shuffle_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(group, int):
raise TypeError("group must be int type")
helper.append_op(
type="shuffle_channel",
inputs={"X": x},
outputs={"Out": out},
attrs={"group": group})
return out
@templatedoc()
def temporal_shift(x, seg_num, shift_ratio=0.25, name=None):
"""
**Temporal Shift Operator**
${comment}
Args:
x(Variable): ${x_comment}
seg_num(int): ${seg_num_comment}
shift_ratio(float): ${shift_ratio_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
out(Variable): The temporal shifting result is a tensor variable with the
same shape and same data type as the input.
Raises:
TypeError: seg_num must be int type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
"""
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(seg_num, int):
raise TypeError("seg_num must be int type.")
helper.append_op(
type="temporal_shift",
inputs={"X": x},
outputs={"Out": out},
attrs={"seg_num": seg_num,
"shift_ratio": shift_ratio})
return out
class PyFuncRegistry(object):
_register_funcs = []
def __init__(self, func):
if func is None or not callable(func):
raise TypeError('func must be a Python function')
self._func = func
# find named args using reflection
args = inspect.getargspec(self._func)
if len(args[0]) == 0 and args[1] is None and args[2] is None:
# Function with no inputs
self._named_args = None
else:
self._named_args = args[0]
self._id = core._append_python_callable_object_and_return_id(self)
'''
Why record self here?
1. For debug usage. Users can call
:code:`py_func.registered_func(idx)` method
to find the registered function corresponding
to :code:`idx`.
2. For increasing reference count of self.
It seems that to release Python object
whose reference count is 1 would cause
segmentation fault error in C++ side.
May be lack of Python GC in C++ side?
'''
PyFuncRegistry._register_funcs.append(self)
@classmethod
def registered_func(cls, idx):
return cls._register_funcs[idx]._func
@classmethod
def registered_func_num(cls):
return len(cls._register_funcs)
@property
def id(self):
return self._id
def __call__(self, *args):
if self._named_args is None:
func_ret = self._func()
else:
kwargs = dict()
idx = 0
for arg in self._named_args:
kwargs[arg] = args[idx]
idx += 1
func_ret = self._func(*args[idx:], **kwargs)
if not isinstance(func_ret, (list, tuple)):
func_ret = (func_ret, )
ret = []
for each_ret in func_ret:
if each_ret is None or isinstance(each_ret, core.LoDTensor):
ret.append(each_ret)
continue
if not isinstance(each_ret, np.ndarray):
each_ret = np.array(each_ret)
tensor = core.LoDTensor()
tensor.set(each_ret, core.CPUPlace())
ret.append(tensor)
return tuple(ret)
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
This OP is used to register customized Python OP to Paddle Fluid. The design
principe of py_func is that LodTensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not None).
``x`` is the input of ``func``, whose type must be LoDTensor; ``out`` is
the output of ``func``, whose type can be either LoDTensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If some variables of ``out`` have no gradient, the
relevant input variable of ``backward_func`` is None. If some variables of
``x`` do not have a gradient, the user should return None in ``backward_func``.
The data type and shape of ``out`` should also be set correctly before this
API is called, and the data type and shape of the gradient of ``out`` and
``x`` will be inferred automatically.
This API can also be used to debug the neural network by setting the ``func``
as a function that only print variables.
Args:
func (callable): The forward function of the registered OP. When the network
is running, the forward output ``out`` will be calculated according to this
function and the forward input ``x``. In ``func`` , it's suggested that we
actively convert LoDTensor into a numpy array, so that we can use Python and
numpy API arbitrarily. If not, some operations of numpy may not be compatible.
x (Variable|tuple(Variale)|list[Variale]): The input of the forward function ``func``.
It can be Variable|tuple(Variale)|list[Variale], where Variable is LoDTensor or
Tenosor. In addition, Multiple Variable should be passed in the form of tuple(Variale)
or list[Variale].
out (Variable|tuple(Variale)|list[Variale]): The output of the forward function ``func``,
it can be Variable|tuple(Variale)|list[Variale], where Variable can be either LoDTensor
or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``,
you must create ``out`` in advance.
backward_func (callable, optional): The backward function of the registered OP.
Its default value is None, which means there is no reverse calculation. If
it is not None, ``backward_func`` is called to calculate the gradient of
``x`` when the network is at backward runtime.
skip_vars_in_backward_input (Variable, optional): It's used to limit the input
variable list of ``backward_func``, and it can be Variable|tuple(Variale)|list[Variale].
It must belong to either ``x`` or ``out``. The default value is None, which means
that no variables need to be removed from ``x`` and ``out``. If it is not None,
these variables will not be the input of ``backward_func``. This parameter is only
useful when ``backward_func`` is not None.
Returns:
Variable|tuple(Variale)|list[Variale]: The output ``out`` of the forward function ``func``.
Examples:
.. code-block:: python
# example 1:
import paddle.fluid as fluid
import six
# Creates a forward function, LodTensor can be input directly without
# being converted into numpy array.
def tanh(x):
return np.tanh(x)
# Skip x in backward function and return the gradient of x
# LodTensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used.
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y)))
# Creates a forward function for debugging running networks(print value)
def debug_func(x):
print(x)
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in six.moves.range(4):
hidden = fluid.layers.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = fluid.layers.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input LodTensor
fluid.layers.py_func(func=debug_func, x=hidden, out=None)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
return fluid.layers.mean(loss)
# example 2:
# This example shows how to turn LoDTensor into numpy array and
# use numpy API to register an Python OP
import paddle.fluid as fluid
import numpy as np
def element_wise_add(x, y):
# LodTensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise AssertionError("the shape of inputs must be the same!")
result = np.zeros(x.shape, dtype='int32')
for i in range(len(x)):
for j in range(len(x[0])):
result[i][j] = x[i][j] + y[i][j]
return result
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
# Input of the forward function
x = fluid.data(name='x', shape=[2,3], dtype='int32')
y = fluid.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
fluid.layers.py_func(func=element_wise_add, x=[x,y], out=output)
exe=fluid.Executor(fluid.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
out = exe.run(main_program,
feed={'x':input1, 'y':input2},
fetch_list=[output.name])
print("{0} + {1} = {2}".format(input1, input2, out))
py_func_demo()
# Reference output:
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13]
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)]
"""
helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
if x is None:
x = []
elif isinstance(x, Variable):
x = [x]
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
elif isinstance(out, Variable):
out_list = [out]
elif isinstance(out, tuple):
out_list = list(out)
elif isinstance(out, list):
out_list = out
else:
raise TypeError(
'Output must be Variable/list(Variable)/tuple(Variable)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = PyFuncRegistry(
backward_func).id if backward_func is not None else -1
for each_out in out_list:
if len(each_out.shape) == 0:
raise ValueError(
'Output shapes of py_func op should be provided by users manually'
)
backward_skip_vars = set()
if backward_func is not None and skip_vars_in_backward_input is not None:
if isinstance(skip_vars_in_backward_input, Variable):
skip_vars_in_backward_input = [skip_vars_in_backward_input]
fwd_in_out = [v.name for v in x]
fwd_in_out.extend([v.name for v in out_list])
fwd_in_out = set(fwd_in_out)
backward_skip_vars = set()
for v in skip_vars_in_backward_input:
if not v.name in fwd_in_out:
raise ValueError(
'Variable {} is not found in forward inputs and outputs'
.format(v.name))
backward_skip_vars.add(v.name)
helper.append_op(
type='py_func',
inputs={'X': x},
outputs={'Out': out_list},
attrs={
'forward_callable_id': fwd_func_id,
'backward_callable_id': bwd_func_id,
'backward_skip_vars': list(backward_skip_vars)
})
return out
# For debug usage
py_func.registered_func = PyFuncRegistry.registered_func
py_func.registered_func_num = PyFuncRegistry.registered_func_num
@templatedoc()
def psroi_pool(input,
rois,
output_channels,
spatial_scale,
pooled_height,
pooled_width,
name=None):
"""
${comment}
Parameters:
input (Variable): ${x_comment}
rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates. The data type is the same as `input`
output_channels (int): ${output_channels_comment}
spatial_scale (float): ${spatial_scale_comment} Default: 1.0
pooled_height (int): ${pooled_height_comment} Default: 1
pooled_width (int): ${pooled_width_comment} Default: 1
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7)
"""
helper = LayerHelper('psroi_pool', **locals())
# check attrs
if not isinstance(output_channels, int):
raise TypeError("output_channels must be int type")
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='psroi_pool',
inputs={'X': input,
'ROIs': rois},
outputs={'Out': out},
attrs={
'output_channels': output_channels,
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
@templatedoc()
def prroi_pool(input,
rois,
spatial_scale=1.0,
pooled_height=1,
pooled_width=1,
batch_roi_nums=None,
name=None):
"""
The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf
Args:
input (Variable):The input of precise roi pooliing.The shape of input tensor is
[N,C,H,W]. Where N is batch size,C is number of input channels,H
is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor or Tensor of shape (num_rois, 4), the lod level
is 1 when it is LoDTensor. The LoD include the rois's batch index
information. If rois is Tensor, its batch index information should
be provided by batch_index.
Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width).
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
pooled_height (integer): The pooled output height. Default: 1.
pooled_width (integer): The pooled output width. Default: 1.
batch_roi_nums (Variable): The number of roi for each image in batch. It
should be 1-D Tensor, with shape [N] and dtype int64,
where N is the batch size. Default: None. Be note: The lod of input should be
empty when batch_roi_nums has values;
name (str, default None): The name of this operation.
Returns:
Variable(Tensor):The shape of the returned Tensor is (N, C, pooled_height, pooled_width), with value type float32,float16. N, C denote batch_size and channels of input respectively.
Examples:
.. code-block:: python
## prroi_pool without batch_roi_num
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7)
## prroi_pool with batch_roi_num
batchsize=4
x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32')
rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32')
batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64')
pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num)
"""
helper = LayerHelper('prroi_pool', **locals())
# check attrs
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
inputs_op = {'X': input, 'ROIs': rois}
if batch_roi_nums is not None:
inputs_op['BatchRoINums'] = batch_roi_nums
helper.append_op(
type='prroi_pool',
inputs=inputs_op,
outputs={'Out': out},
attrs={
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
def pixel_shuffle(x, upscale_factor):
"""
This op rearranges elements in a tensor of shape [N, C, H, W]
to a tensor of shape [N, C/r**2, H*r, W*r].
This is useful for implementing efficient sub-pixel convolution
with a stride of 1/r.
Please refer to the paper: `Real-Time Single Image and Video Super-Resolution
Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ .
by Shi et. al (2016) for more details.
Parameters:
x(Variable): 4-D tensor, the data type should be float32 or float64.
upscale_factor(int): factor to increase spatial resolution.
Returns:
Out(Variable): Reshaped tensor according to the new dimension.
Raises:
ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,9,4,4])
output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,9,4,4).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
# print(output.shape)
# (2L, 1L, 12L, 12L)
"""
helper = LayerHelper("pixel_shuffle", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={"upscale_factor": upscale_factor})
return out
def fsp_matrix(x, y):
"""
**FSP matrix op**
This op is used to calculate the flow of solution procedure (FSP) matrix of two 4-D Tensor feature maps.
Given feature map x with shape [x_channel, h, w] and feature map y with shape
[y_channel, h, w], we can get the fsp matrix of x and y in two steps:
1. reshape x into matrix with shape [x_channel, h * w] and reshape and
transpose y into matrix with shape [h * w, y_channel].
2. multiply x and y to get fsp matrix with shape [x_channel, y_channel].
The output is a batch of fsp matrices.
Args:
x (Variable): A 4-D Tensor feature map with shape [batch_size, x_channel, height, width].
A Tensor with type float32, float64.
y (Variable): A 4-D Tensor feature map with shape [batch_size, y_channel, height, width].
The y_channel can be different with the x_channel of Input(X)
while the other dimensions must be the same with Input(X)'s. A Tensor with
type float32, float64.
Returns:
fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel].
The x_channel is the channel of x and the y_channel is the channel of y. A Tensor with
type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32])
feature_map_0 = fluid.layers.conv2d(data, num_filters=2,
filter_size=3)
feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2,
filter_size=1)
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
helper = LayerHelper('fsp_matrix', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
input_param_name='x'))
helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def continuous_value_model(input, cvm, use_cvm=True):
"""
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
A Tensor with type float32, float64.
cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
A Tensor with type float32, float64.
use_cvm (bool): Use show_click or not. if use, the output dim is the same as input.
if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[64, 1], dtype="int64")
label = fluid.data(name="label", shape=[64, 1], dtype="int64")
embed = fluid.layers.embedding(
input=input,
size=[100, 11],
dtype='float32')
ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1)
show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
helper.append_op(
type='cvm',
inputs={'X': [input],
'CVM': [cvm]},
outputs={'Y': [out]},
attrs={"use_cvm": use_cvm})
return out
def where(condition):
"""
Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`.
Args:
condition(Variable): A bool tensor with rank at least 1, the data type is bool.
Returns:
Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# condition is a tensor [True, False, True]
condition = layers.assign(np.array([1, 0, 1], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0], [2]]
# condition is a tensor [[True, False], [False, True]]
condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0, 0], [1, 1]]
# condition is a tensor [False, False, False]
condition = layers.assign(np.array([0, 0, 0], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[]]
"""
check_variable_and_dtype(condition, "condition", ['bool'], "where")
helper = LayerHelper("where_index", **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='where_index',
inputs={'Condition': condition},
outputs={'Out': [out]})
return out
def sign(x):
"""
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \
the input data type is float32 or float64.
Returns:
Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# [1.0, 0.0, -1.0]
data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32'))
"""
helper = LayerHelper("sign", **locals())
check_type(x, 'x', (Variable, np.ndarray), 'sign')
if isinstance(x, np.ndarray):
x = assign(x)
check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def unique(x, dtype='int32'):
"""
**unique**
Return a unique tensor for `x` and an index tensor pointing to this unique tensor.
Args:
x(Variable): A 1-D input tensor.
dtype(np.dtype|core.VarDesc.VarType|str): The type of index tensor: int32, int64.
Returns:
tuple: (out, index). `out` is the unique tensor for `x`, with identical dtype to `x`, and \
`index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique")
helper = LayerHelper("unique", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index]})
return out, index
def unique_with_counts(x, dtype='int32'):
"""
This OP return a unique tensor for `x` , and count tensor that the count of unique result in raw input, \
and an index tensor pointing to this unique tensor.
**NOTICE**: This op support the variable type of Tensor only.
Args:
x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32.
Returns:
tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\
the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\
to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unique element in\
the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
# count is [1, 3, 1, 1]
# x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,)
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique_with_counts")
if not (dtype == 'int32' or dtype == 'int64'):
raise TypeError(
"Op unique_with_counts, index dtype must be int32 or int64")
if x is None or len(x.shape) != 1:
raise ValueError(
"Op unique_with_counts, x must not be null and size of dim must be 1"
)
helper = LayerHelper("unique_with_counts", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
count = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique_with_counts',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index],
'Count': [count]})
return out, index, count
def deformable_conv(input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None):
"""
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Variable): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, Variable):
raise TypeError("Input of deformable_conv must be Variable")
if not isinstance(offset, Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
"""
This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter sliding over
the input feature map, a series of such columns will be formed.
For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
can be calculated as following.
.. math::
dkernel[0] &= dilations[0] \\times (kernel\_sizes[0] - 1) + 1
dkernel[1] &= dilations[1] \\times (kernel\_sizes[1] - 1) + 1
hout &= \\frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1
wout &= \\frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1
Cout &= C \\times kernel\_sizes[0] \\times kernel\_sizes[1]
Lout &= hout \\times wout
Parameters:
x(Varaible): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride].
For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The tensor variable corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decriabled above.
Cout is the total number of values within each block,
and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x`
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name = 'data', shape = [100, 3, 224, 224], dtype = 'float32')
y = fluid.layers.unfold(x, [3, 3], 1, 1, 1)
"""
helper = LayerHelper("unfold", **locals())
assert len(x.shape) == 4, \
"input should be the format of [N, C, H, W]"
if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes]
else:
assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
"kernel_sizes should either be an integer or a list of two integers"
if isinstance(strides, int):
strides = [strides, strides]
else:
assert isinstance(strides, list) and (len(strides) == 2), \
"strides should either be an integer or a list of two integers"
if isinstance(dilations, int):
dilations = [dilations, dilations]
else:
assert isinstance(dilations, list) and (len(dilations) == 2), \
"dilations should either be an integer or a list of two integers"
if isinstance(paddings, int):
paddings = [paddings] * 4
elif isinstance(paddings, list):
if len(paddings) == 2:
paddings = paddings * 2
elif len(paddings) == 4:
pass
else:
raise ValueError(
"paddings should either be an integer or a list of 2 or 4 integers"
)
else:
raise ValueError(
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="unfold",
inputs={"X": x},
outputs={"Y": out},
attrs={
"kernel_sizes": kernel_sizes,
"strides": strides,
"paddings": paddings,
"dilations": dilations
})
return out
def deformable_roi_pooling(input,
rois,
trans,
no_trans=False,
spatial_scale=1.0,
group_size=[1, 1],
pooled_height=1,
pooled_width=1,
part_size=None,
sample_per_part=1,
trans_std=0.1,
position_sensitive=False,
name=None):
"""
Deformable ROI Pooling Layer
Performs deformable region-of-interest pooling on inputs. As described
in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after
roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling.
The operation has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height.
2. Add offset to pixel in ROI to get new location and the new value which are computed directly through
bilinear interpolation with four nearest pixel.
3. Sample several points in each bin to get average values as output.
Args:
input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is
[N, C, H, W]. Where N is batch size, C is number of input channels,
H is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be
a 2-D LoDTensor of shape (num_rois, 4), and the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates, which value type is float32.
trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where
N is number of ROIs, C is number of channels, which indicate the offset distance
in the x and y directions, H is pooled height, and W is pooled width.
no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False.
If value is True, no offset will be added in operation. Default: False.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32.
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels
is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output
channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1].
pooled_height (int): The pooled output height which value type is int32. Default: 1.
pooled_width (int): The pooled output width which value type is int32. Default: 1.
part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \
and pooled_width. Default: if None, default value is [pooled_height, pooled_width].
sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1.
trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1.
position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \
If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False.
name (str|None): Name of layer. Default: None.
Returns:
Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\
input dimension should be the result of output dimension divided by pooled height and pooled width.
Examples:
.. code-block:: python
# position_sensitive=True
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=True)
# position_sensitive=False
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=False)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(trans, 'trans', ['float32', 'float64'],
'deformable_roi_pooling')
check_type(group_size, 'group_size', (list, tuple),
'deformable_roi_pooling')
if part_size is not None:
check_type(part_size, 'part_size', (list, tuple),
'deformable_roi_pooling')
input_channels = input.shape[1]
if position_sensitive == False:
output_channels = input_channels
else:
output_channels = input_channels / pooled_height / pooled_width
if part_size is None:
part_height = pooled_height
part_width = pooled_width
part_size = [part_height, part_width]
part_size = utils.convert_to_list(part_size, 2, 'part_size')
group_size = utils.convert_to_list(group_size, 2, 'group_size')
helper = LayerHelper('deformable_psroi_pooling', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
top_count = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="deformable_psroi_pooling",
inputs={"Input": input,
"ROIs": rois,
"Trans": trans},
outputs={"Output": output,
"TopCount": top_count},
attrs={
"no_trans": no_trans,
"spatial_scale": spatial_scale,
"output_dim": output_channels,
"group_size": group_size,
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"part_size": part_size,
"sample_per_part": sample_per_part,
"trans_std": trans_std
})
return output
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
"""
This operator recomputes the `input` indices according to the offset of the
shard. The length of the indices is evenly divided into N shards, and if
the `shard_id` matches the shard with the input index inside, the index is
recomputed on the basis of the shard offset, elsewise it is set to
`ignore_value`. The detail is as follows:
::
shard_size = (index_num + nshards - 1) // nshards
y = x % shard_size if x // shard_size == shard_id else ignore_value
NOTE: If the length of indices cannot be evely divided by the shard number,
the size of the last shard will be less than the calculated `shard_size`
Examples:
::
Input:
X.shape = [4, 1]
X.data = [[1], [6], [12], [19]]
index_num = 20
nshards = 2
ignore_value = -1
if shard_id == 0, we get:
Out.shape = [4, 1]
Out.data = [[1], [6], [-1], [-1]]
if shard_id == 1, we get:
Out.shape = [4, 1]
Out.data = [[-1], [-1], [2], [9]]
Args:
- **input** (Variable): Input indices, last dimension must be 1.
- **index_num** (scalar): An integer defining the range of the index.
- **nshards** (scalar): The number of shards
- **shard_id** (scalar): The index of the current shard
- **ignore_value** (scalar): An integer value out of sharded index range
Returns:
Variable: The sharded index of input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
batch_size = 32
label = fluid.data(name="label", shape=[batch_size, 1], dtype="int64")
shard_label = fluid.layers.shard_index(input=label,
index_num=20,
nshards=2,
shard_id=0)
"""
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
raise ValueError('The shard_id(%d) should be in [0, %d)' %
(shard_id, nshards))
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'X': [input]},
outputs={'Out': out},
attrs={
'index_num': index_num,
'nshards': nshards,
'shard_id': shard_id,
'ignore_value': ignore_value
},
stop_gradient=True)
return out
@templatedoc()
def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
"""
This operator implements the hard_swish activation function.
Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function.
For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
The formula is as follows:
.. math::
out = \\frac{x * (min(max(0, x+offset), threshold))}{scale}
In the above equation:
``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters.
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64.
threshold (float, optional): The threshold in Relu function. Default: 6.0
scale (float, optional): The scale factor. Default: 6.0
offset (float, optional): The offset factor. Default: 3.0
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.hard_swish(x)
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_swish')
helper = LayerHelper('hard_swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold,
'scale': scale,
'offset': offset})
return out
def gather_tree(ids, parents):
"""
To be used after beam search. After beam search, we get selected ids at
each time step and the corresponding parents in the search tree. Both ids
and parents have the layout :attr:`[max_time, batch_size, beam_size]`. Then
:attr:`gather_tree` is used to backtrace from the last time step and
generate the full sequences by collecting selected ids.
Here is an example:
.. code-block:: text
Given:
ids = [[[2 2]
[6 1]]
[[3 9]
[6 1]]
[[0 1]
[9 0]]]
parents = [[[0 0]
[1 1]]
[[1 0]
[1 0]]
[[0 0]
[0 1]]]
Then:
gather_tree(ids, parents)
= [[[2 2]
[1 6]]
[[3 3]
[6 1]]
[[0 1]
[9 0]]]
Args:
ids(Variable): A Tensor with shape :attr:`[length, batch_size, beam_size]`
and data type :attr:`int32` or :attr:`int64`. It contains the selected
ids of all time steps.
parents(Variable): A Tensor with the same shape and data type as :attr:`ids`,
It contains the parents corresponding to selected ids when searching
among beams.
Returns:
Variable: A Tensor with the same shape and data type as :attr:`ids`. \
It contains the full sequences. The sequences are collected from \
:attr:`ids` by backtracing according to :attr:`parents`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
ids = fluid.layers.data(name='ids',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
parents = fluid.layers.data(name='parents',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
final_sequences = fluid.layers.gather_tree(ids, parents)
"""
helper = LayerHelper('gather_tree', **locals())
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
type="gather_tree",
inputs={"Ids": ids,
"Parents": parents},
outputs={"Out": out})
return out
@templatedoc()
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max).
Examples:
::
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape (list|tuple|Variable): The shape of the output Tensor, if the shape is a list or tuple,
its elements can be an integer
or a Tensor with the shape [1], and the type of the Tensor must be int32 or int64.
If the shape is a Variable, it is a 1-D Tensor, and the type of the Tensor must be int32 or int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The type of the output Tensor. Supported data types: float32, float64.
Default: float32.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a
seed generated by the system. Note that if seed is not 0, this
operator will always generate the same random numbers every time.
Default 0.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values.
Raises:
TypeError: The shape type should be list or tuple or variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
# example 2:
# attr shape is a list which contains tensor Variable.
dim_1 = fluid.layers.fill_constant([1],"int64",3)
dim_2 = fluid.layers.fill_constant([1],"int32",5)
result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2])
# example 3:
# attr shape is a Variable, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.uniform_random(var_shape)
var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32")
result_4 = fluid.layers.uniform_random(var_shape_int32)
"""
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'uniform_random')
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int64')
fill_constant([1], 'int64', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
assert dim_size > 0, (
"Each dimension size given in shape must not be negative "
"except one unknown dimension.")
return attrs_shape
helper = LayerHelper("uniform_random", **locals())
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max}
if in_dygraph_mode():
attrs['shape'] = shape
else:
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of argument(shape) can't be zero.")
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensorList'] = get_new_shape_tensor(shape)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs,
outputs={"Out": out})
return helper.append_activation(out)
|
py | 1a387ba6e8ae9d640560714da09ab220e04dd996 | from dotenv import load_dotenv
load_dotenv()
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import tkinter as tk
import webbrowser
def open_spotify(url):
webbrowser.open(url, new = 2)
def create_label(text):
return tk.Label(master = frm_recommendations, text = text)
def create_button(text, url):
return tk.Button(master = frm_recommendations, text = text, command = lambda:open_spotify(url))
def clear(*args):
args.destroy()
def display_recommendations(response):
lbl_track_name = tk.Label(master = frm_recommendations, text = 'Track Name')
lbl_artist_name = tk.Label(master = frm_recommendations, text = 'Artist Name')
lbl_play_it = tk.Label(master = frm_recommendations, text = 'Play It')
lbl_track_name.grid(row = 0,column = 0)
lbl_artist_name.grid(row = 0,column = 1)
lbl_play_it.grid(row = 0,column = 2)
for idx, track in enumerate(response['tracks']):
lbl_track_name_recommended = create_label(track['name'])
lbl_track_name_recommended.grid(row = idx + 1, column = 0)
lbl_artist_name_recommended = create_label(track['artists'][0]['name'])
lbl_artist_name_recommended.grid(row = idx + 1, column = 1)
btn_plat_it_recommended = create_button('Play It', track['external_urls']['spotify'])
btn_plat_it_recommended.grid(row = idx + 1, column = 2, padx = 10)
def get_recommendations():
search = ent_search.get()
sp = spotipy.Spotify(client_credentials_manager = SpotifyClientCredentials ("#### enter your spotify client id ####","#### Get your secret client id here https://developer.spotify.com/dashboard/applications ####"))
result = sp.search(q = search, limit =1)
id_list = [result['tracks']['items'][0]['id']]
recommendations = sp.recommendations(seed_tracks = id_list, limit = 10)
display_recommendations(recommendations)
window = tk.Tk()
frm_search_field = tk.Frame(master = window, width = 100)
frm_recommendations = tk.Frame(master = window)
frm_search_field.pack()
frm_recommendations.pack()
ent_search = tk.Entry(master= frm_search_field, width =25)
btn_get_recommendations = tk.Button(master = frm_search_field, text = 'Get recommendations', command= get_recommendations)
ent_search.grid(row = 0,column = 0,pady = 10,padx = 10)
btn_get_recommendations.grid(row = 0,column = 1,pady = 10,padx = 10)
window.mainloop()
|
py | 1a387c2dc15313e4b868d3f8f39b06eab4e8f7e2 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from collections import namedtuple, deque
import concurrent.futures
import logging
import struct
import sys
import time
import threading
import traceback
import zlib
import aiohttp
from . import utils
from .activity import BaseActivity
from .enums import SpeakingState
from .errors import ConnectionClosed, InvalidArgument
_log = logging.getLogger(__name__)
__all__ = (
"DiscordWebSocket",
"KeepAliveHandler",
"VoiceKeepAliveHandler",
"DiscordVoiceWebSocket",
"ReconnectWebSocket",
)
class ReconnectWebSocket(Exception):
"""Signals to safely reconnect the websocket."""
def __init__(self, shard_id, *, resume=True):
self.shard_id = shard_id
self.resume = resume
self.op = "RESUME" if resume else "IDENTIFY"
class WebSocketClosure(Exception):
"""An exception to make up for the fact that aiohttp doesn't signal closure."""
pass
EventListener = namedtuple("EventListener", "predicate event result future")
class GatewayRatelimiter:
def __init__(self, count=110, per=60.0):
# The default is 110 to give room for at least 10 heartbeats per minute
self.max = count
self.remaining = count
self.window = 0.0
self.per = per
self.lock = asyncio.Lock()
self.shard_id = None
def is_ratelimited(self):
current = time.time()
if current > self.window + self.per:
return False
return self.remaining == 0
def get_delay(self):
current = time.time()
if current > self.window + self.per:
self.remaining = self.max
if self.remaining == self.max:
self.window = current
if self.remaining == 0:
return self.per - (current - self.window)
self.remaining -= 1
if self.remaining == 0:
self.window = current
return 0.0
async def block(self):
async with self.lock:
delta = self.get_delay()
if delta:
_log.warning(
"WebSocket in shard ID %s is ratelimited, waiting %.2f seconds",
self.shard_id,
delta,
)
await asyncio.sleep(delta)
class KeepAliveHandler(threading.Thread):
def __init__(self, *args, **kwargs):
ws = kwargs.pop("ws", None)
interval = kwargs.pop("interval", None)
shard_id = kwargs.pop("shard_id", None)
threading.Thread.__init__(self, *args, **kwargs)
self.ws = ws
self._main_thread_id = ws.thread_id
self.interval = interval
self.daemon = True
self.shard_id = shard_id
self.msg = "Keeping shard ID %s websocket alive with sequence %s."
self.block_msg = "Shard ID %s heartbeat blocked for more than %s seconds."
self.behind_msg = "Can't keep up, shard ID %s websocket is %.1fs behind."
self._stop_ev = threading.Event()
self._last_ack = time.perf_counter()
self._last_send = time.perf_counter()
self._last_recv = time.perf_counter()
self.latency = float("inf")
self.heartbeat_timeout = ws._max_heartbeat_timeout
def run(self):
while not self._stop_ev.wait(self.interval):
if self._last_recv + self.heartbeat_timeout < time.perf_counter():
_log.warning(
"Shard ID %s has stopped responding to the gateway. Closing and restarting.",
self.shard_id,
)
coro = self.ws.close(4000)
f = asyncio.run_coroutine_threadsafe(coro, loop=self.ws.loop)
try:
f.result()
except Exception:
_log.exception(
"An error occurred while stopping the gateway. Ignoring."
)
finally:
self.stop()
return
data = self.get_payload()
_log.debug(self.msg, self.shard_id, data["d"])
coro = self.ws.send_heartbeat(data)
f = asyncio.run_coroutine_threadsafe(coro, loop=self.ws.loop)
try:
# block until sending is complete
total = 0
while True:
try:
f.result(10)
break
except concurrent.futures.TimeoutError:
total += 10
try:
frame = sys._current_frames()[self._main_thread_id]
except KeyError:
msg = self.block_msg
else:
stack = "".join(traceback.format_stack(frame))
msg = f"{self.block_msg}\nLoop thread traceback (most recent call last):\n{stack}"
_log.warning(msg, self.shard_id, total)
except Exception:
self.stop()
else:
self._last_send = time.perf_counter()
def get_payload(self):
return {"op": self.ws.HEARTBEAT, "d": self.ws.sequence}
def stop(self):
self._stop_ev.set()
def tick(self):
self._last_recv = time.perf_counter()
def ack(self):
ack_time = time.perf_counter()
self._last_ack = ack_time
self.latency = ack_time - self._last_send
if self.latency > 10:
_log.warning(self.behind_msg, self.shard_id, self.latency)
class VoiceKeepAliveHandler(KeepAliveHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.recent_ack_latencies = deque(maxlen=20)
self.msg = "Keeping shard ID %s voice websocket alive with timestamp %s."
self.block_msg = "Shard ID %s voice heartbeat blocked for more than %s seconds"
self.behind_msg = "High socket latency, shard ID %s heartbeat is %.1fs behind"
def get_payload(self):
return {"op": self.ws.HEARTBEAT, "d": int(time.time() * 1000)}
def ack(self):
ack_time = time.perf_counter()
self._last_ack = ack_time
self._last_recv = ack_time
self.latency = ack_time - self._last_send
self.recent_ack_latencies.append(self.latency)
class DiscordClientWebSocketResponse(aiohttp.ClientWebSocketResponse):
async def close(self, *, code: int = 4000, message: bytes = b"") -> bool:
return await super().close(code=code, message=message)
class DiscordWebSocket:
"""Implements a WebSocket for Discord's gateway v6.
Attributes
-----------
DISPATCH
Receive only. Denotes an event to be sent to Discord, such as READY.
HEARTBEAT
When received tells Discord to keep the connection alive.
When sent asks if your connection is currently alive.
IDENTIFY
Send only. Starts a new session.
PRESENCE
Send only. Updates your presence.
VOICE_STATE
Send only. Starts a new connection to a voice guild.
VOICE_PING
Send only. Checks ping time to a voice guild, do not use.
RESUME
Send only. Resumes an existing connection.
RECONNECT
Receive only. Tells the client to reconnect to a new gateway.
REQUEST_MEMBERS
Send only. Asks for the full member list of a guild.
INVALIDATE_SESSION
Receive only. Tells the client to optionally invalidate the session
and IDENTIFY again.
HELLO
Receive only. Tells the client the heartbeat interval.
HEARTBEAT_ACK
Receive only. Confirms receiving of a heartbeat. Not having it implies
a connection issue.
GUILD_SYNC
Send only. Requests a guild sync.
gateway
The gateway we are currently connected to.
token
The authentication token for discord.
"""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE = 3
VOICE_STATE = 4
VOICE_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_MEMBERS = 8
INVALIDATE_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
def __init__(self, socket, *, loop):
self.socket = socket
self.loop = loop
# an empty dispatcher to prevent crashes
self._dispatch = lambda *args: None
# generic event listeners
self._dispatch_listeners = []
# the keep alive
self._keep_alive = None
self.thread_id = threading.get_ident()
# ws related stuff
self.session_id = None
self.sequence = None
self._zlib = zlib.decompressobj()
self._buffer = bytearray()
self._close_code = None
self._rate_limiter = GatewayRatelimiter()
@property
def open(self):
return not self.socket.closed
def is_ratelimited(self):
return self._rate_limiter.is_ratelimited()
def debug_log_receive(self, data, /):
self._dispatch("socket_raw_receive", data)
def log_receive(self, _, /):
pass
@classmethod
async def from_client(
cls,
client,
*,
initial=False,
gateway=None,
shard_id=None,
session=None,
sequence=None,
resume=False,
):
"""Creates a main websocket for Discord from a :class:`Client`.
This is for internal use only.
"""
gateway = gateway or await client.http.get_gateway()
socket = await client.http.ws_connect(gateway)
ws = cls(socket, loop=client.loop)
# dynamically add attributes needed
ws.token = client.http.token
ws._connection = client._connection
ws._discord_parsers = client._connection.parsers
ws._dispatch = client.dispatch
ws.gateway = gateway
ws.call_hooks = client._connection.call_hooks
ws._initial_identify = initial
ws.shard_id = shard_id
ws._rate_limiter.shard_id = shard_id
ws.shard_count = client._connection.shard_count
ws.session_id = session
ws.sequence = sequence
ws._max_heartbeat_timeout = client._connection.heartbeat_timeout
if client._enable_debug_events:
ws.send = ws.debug_send
ws.log_receive = ws.debug_log_receive
client._connection._update_references(ws)
_log.debug("Created websocket connected to %s", gateway)
# poll event for OP Hello
await ws.poll_event()
if not resume:
await ws.identify()
return ws
await ws.resume()
return ws
def wait_for(self, event, predicate, result=None):
"""Waits for a DISPATCH'd event that meets the predicate.
Parameters
-----------
event: :class:`str`
The event name in all upper case to wait for.
predicate
A function that takes a data parameter to check for event
properties. The data parameter is the 'd' key in the JSON message.
result
A function that takes the same data parameter and executes to send
the result to the future. If ``None``, returns the data.
Returns
--------
asyncio.Future
A future to wait for.
"""
future = self.loop.create_future()
entry = EventListener(
event=event, predicate=predicate, result=result, future=future
)
self._dispatch_listeners.append(entry)
return future
async def identify(self):
"""Sends the IDENTIFY packet."""
payload = {
"op": self.IDENTIFY,
"d": {
"token": self.token,
"properties": {
"$os": sys.platform,
"$browser": "discord.py",
"$device": "discord.py",
"$referrer": "",
"$referring_domain": "",
},
"compress": True,
"large_threshold": 250,
"v": 3,
},
}
if self.shard_id is not None and self.shard_count is not None:
payload["d"]["shard"] = [self.shard_id, self.shard_count]
state = self._connection
if state._activity is not None or state._status is not None:
payload["d"]["presence"] = {
"status": state._status,
"game": state._activity,
"since": 0,
"afk": False,
}
if state._intents is not None:
payload["d"]["intents"] = state._intents.value
await self.call_hooks(
"before_identify", self.shard_id, initial=self._initial_identify
)
await self.send_as_json(payload)
_log.info("Shard ID %s has sent the IDENTIFY payload.", self.shard_id)
async def resume(self):
"""Sends the RESUME packet."""
payload = {
"op": self.RESUME,
"d": {
"seq": self.sequence,
"session_id": self.session_id,
"token": self.token,
},
}
await self.send_as_json(payload)
_log.info("Shard ID %s has sent the RESUME payload.", self.shard_id)
async def received_message(self, msg, /):
if type(msg) is bytes:
self._buffer.extend(msg)
if len(msg) < 4 or msg[-4:] != b"\x00\x00\xff\xff":
return
msg = self._zlib.decompress(self._buffer)
msg = msg.decode("utf-8")
self._buffer = bytearray()
self.log_receive(msg)
msg = utils._from_json(msg)
self._dispatch("socket_response")
_log.debug("For Shard ID %s: WebSocket Event: %s", self.shard_id, msg)
event = msg.get("t")
if event:
self._dispatch("socket_event_type", event)
op = msg.get("op")
data = msg.get("d")
seq = msg.get("s")
if seq is not None:
self.sequence = seq
if self._keep_alive:
self._keep_alive.tick()
if op != self.DISPATCH:
if op == self.RECONNECT:
# "reconnect" can only be handled by the Client
# so we terminate our connection and raise an
# internal exception signalling to reconnect.
_log.debug("Received RECONNECT opcode.")
await self.close()
raise ReconnectWebSocket(self.shard_id)
if op == self.HEARTBEAT_ACK:
if self._keep_alive:
self._keep_alive.ack()
return
if op == self.HEARTBEAT:
if self._keep_alive:
beat = self._keep_alive.get_payload()
await self.send_as_json(beat)
return
if op == self.HELLO:
interval = data["heartbeat_interval"] / 1000.0
self._keep_alive = KeepAliveHandler(
ws=self, interval=interval, shard_id=self.shard_id
)
# send a heartbeat immediately
await self.send_as_json(self._keep_alive.get_payload())
self._keep_alive.start()
return
if op == self.INVALIDATE_SESSION:
if data is True:
await self.close()
raise ReconnectWebSocket(self.shard_id)
self.sequence = None
self.session_id = None
_log.info("Shard ID %s session has been invalidated.", self.shard_id)
await self.close(code=1000)
raise ReconnectWebSocket(self.shard_id, resume=False)
_log.warning("Unknown OP code %s.", op)
return
if event == "READY":
self._trace = trace = data.get("_trace", [])
self.sequence = msg["s"]
self.session_id = data["session_id"]
# pass back shard ID to ready handler
data["__shard_id__"] = self.shard_id
_log.info(
"Shard ID %s has connected to Gateway: %s (Session ID: %s).",
self.shard_id,
", ".join(trace),
self.session_id,
)
elif event == "RESUMED":
self._trace = trace = data.get("_trace", [])
# pass back the shard ID to the resumed handler
data["__shard_id__"] = self.shard_id
_log.info(
"Shard ID %s has successfully RESUMED session %s under trace %s.",
self.shard_id,
self.session_id,
", ".join(trace),
)
try:
func = self._discord_parsers[event]
except KeyError:
_log.debug("Unknown event %s.", event)
else:
func(data)
# remove the dispatched listeners
removed = []
for index, entry in enumerate(self._dispatch_listeners):
if entry.event != event:
continue
future = entry.future
if future.cancelled():
removed.append(index)
continue
try:
valid = entry.predicate(data)
except Exception as exc:
future.set_exception(exc)
removed.append(index)
else:
if valid:
ret = data if entry.result is None else entry.result(data)
future.set_result(ret)
removed.append(index)
for index in reversed(removed):
del self._dispatch_listeners[index]
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds."""
heartbeat = self._keep_alive
return float("inf") if heartbeat is None else heartbeat.latency
def _can_handle_close(self):
code = self._close_code or self.socket.close_code
return code not in (1000, 4004, 4010, 4011, 4012, 4013, 4014)
async def poll_event(self):
"""Polls for a DISPATCH event and handles the general gateway loop.
Raises
------
ConnectionClosed
The websocket connection was terminated for unhandled reasons.
"""
try:
msg = await self.socket.receive(timeout=self._max_heartbeat_timeout)
if msg.type is aiohttp.WSMsgType.TEXT:
await self.received_message(msg.data)
elif msg.type is aiohttp.WSMsgType.BINARY:
await self.received_message(msg.data)
elif msg.type is aiohttp.WSMsgType.ERROR:
_log.debug("Received %s", msg)
raise msg.data
elif msg.type in (
aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.CLOSING,
aiohttp.WSMsgType.CLOSE,
):
_log.debug("Received %s", msg)
raise WebSocketClosure
except (asyncio.TimeoutError, WebSocketClosure) as e:
# Ensure the keep alive handler is closed
if self._keep_alive:
self._keep_alive.stop()
self._keep_alive = None
if isinstance(e, asyncio.TimeoutError):
_log.info("Timed out receiving packet. Attempting a reconnect.")
raise ReconnectWebSocket(self.shard_id) from None
code = self._close_code or self.socket.close_code
if self._can_handle_close():
_log.info("Websocket closed with %s, attempting a reconnect.", code)
raise ReconnectWebSocket(self.shard_id) from None
else:
_log.info("Websocket closed with %s, cannot reconnect.", code)
raise ConnectionClosed(
self.socket, shard_id=self.shard_id, code=code
) from None
async def debug_send(self, data, /):
await self._rate_limiter.block()
self._dispatch("socket_raw_send", data)
await self.socket.send_str(data)
async def send(self, data, /):
await self._rate_limiter.block()
await self.socket.send_str(data)
async def send_as_json(self, data):
try:
await self.send(utils._to_json(data))
except RuntimeError as exc:
if not self._can_handle_close():
raise ConnectionClosed(self.socket, shard_id=self.shard_id) from exc
async def send_heartbeat(self, data):
# This bypasses the rate limit handling code since it has a higher priority
try:
await self.socket.send_str(utils._to_json(data))
except RuntimeError as exc:
if not self._can_handle_close():
raise ConnectionClosed(self.socket, shard_id=self.shard_id) from exc
async def change_presence(self, *, activity=None, status=None, since=0.0):
if activity is not None:
if not isinstance(activity, BaseActivity):
raise InvalidArgument("activity must derive from BaseActivity.")
activity = [activity.to_dict()]
else:
activity = []
if status == "idle":
since = int(time.time() * 1000)
payload = {
"op": self.PRESENCE,
"d": {
"activities": activity,
"afk": False,
"since": since,
"status": status,
},
}
sent = utils._to_json(payload)
_log.debug('Sending "%s" to change status', sent)
await self.send(sent)
async def request_chunks(
self, guild_id, query=None, *, limit, user_ids=None, presences=False, nonce=None
):
payload = {
"op": self.REQUEST_MEMBERS,
"d": {"guild_id": guild_id, "presences": presences, "limit": limit},
}
if nonce:
payload["d"]["nonce"] = nonce
if user_ids:
payload["d"]["user_ids"] = user_ids
if query is not None:
payload["d"]["query"] = query
await self.send_as_json(payload)
async def voice_state(self, guild_id, channel_id, self_mute=False, self_deaf=False):
payload = {
"op": self.VOICE_STATE,
"d": {
"guild_id": guild_id,
"channel_id": channel_id,
"self_mute": self_mute,
"self_deaf": self_deaf,
},
}
_log.debug("Updating our voice state to %s.", payload)
await self.send_as_json(payload)
async def close(self, code=4000):
if self._keep_alive:
self._keep_alive.stop()
self._keep_alive = None
self._close_code = code
await self.socket.close(code=code)
class DiscordVoiceWebSocket:
"""Implements the websocket protocol for handling voice connections.
Attributes
-----------
IDENTIFY
Send only. Starts a new voice session.
SELECT_PROTOCOL
Send only. Tells discord what encryption mode and how to connect for voice.
READY
Receive only. Tells the websocket that the initial connection has completed.
HEARTBEAT
Send only. Keeps your websocket connection alive.
SESSION_DESCRIPTION
Receive only. Gives you the secret key required for voice.
SPEAKING
Send only. Notifies the client if you are currently speaking.
HEARTBEAT_ACK
Receive only. Tells you your heartbeat has been acknowledged.
RESUME
Sent only. Tells the client to resume its session.
HELLO
Receive only. Tells you that your websocket connection was acknowledged.
RESUMED
Sent only. Tells you that your RESUME request has succeeded.
CLIENT_CONNECT
Indicates a user has connected to voice.
CLIENT_DISCONNECT
Receive only. Indicates a user has disconnected from voice.
"""
IDENTIFY = 0
SELECT_PROTOCOL = 1
READY = 2
HEARTBEAT = 3
SESSION_DESCRIPTION = 4
SPEAKING = 5
HEARTBEAT_ACK = 6
RESUME = 7
HELLO = 8
RESUMED = 9
CLIENT_CONNECT = 12
CLIENT_DISCONNECT = 13
def __init__(self, socket, loop, *, hook=None):
self.ws = socket
self.loop = loop
self._keep_alive = None
self._close_code = None
self.secret_key = None
if hook:
self._hook = hook
async def _hook(self, *args):
pass
async def send_as_json(self, data):
_log.debug("Sending voice websocket frame: %s.", data)
await self.ws.send_str(utils._to_json(data))
send_heartbeat = send_as_json
async def resume(self):
state = self._connection
payload = {
"op": self.RESUME,
"d": {
"token": state.token,
"server_id": str(state.server_id),
"session_id": state.session_id,
},
}
await self.send_as_json(payload)
async def identify(self):
state = self._connection
payload = {
"op": self.IDENTIFY,
"d": {
"server_id": str(state.server_id),
"user_id": str(state.user.id),
"session_id": state.session_id,
"token": state.token,
},
}
await self.send_as_json(payload)
@classmethod
async def from_client(cls, client, *, resume=False, hook=None):
"""Creates a voice websocket for the :class:`VoiceClient`."""
gateway = "wss://" + client.endpoint + "/?v=4"
http = client._state.http
socket = await http.ws_connect(gateway, compress=15)
ws = cls(socket, loop=client.loop, hook=hook)
ws.gateway = gateway
ws._connection = client
ws._max_heartbeat_timeout = 60.0
ws.thread_id = threading.get_ident()
if resume:
await ws.resume()
else:
await ws.identify()
return ws
async def select_protocol(self, ip, port, mode):
payload = {
"op": self.SELECT_PROTOCOL,
"d": {
"protocol": "udp",
"data": {"address": ip, "port": port, "mode": mode},
},
}
await self.send_as_json(payload)
async def client_connect(self):
payload = {
"op": self.CLIENT_CONNECT,
"d": {"audio_ssrc": self._connection.ssrc},
}
await self.send_as_json(payload)
async def speak(self, state=SpeakingState.voice):
payload = {"op": self.SPEAKING, "d": {"speaking": int(state), "delay": 0}}
await self.send_as_json(payload)
async def received_message(self, msg):
_log.debug("Voice websocket frame received: %s", msg)
op = msg["op"]
data = msg.get("d")
if op == self.READY:
await self.initial_connection(data)
elif op == self.HEARTBEAT_ACK:
self._keep_alive.ack()
elif op == self.RESUMED:
_log.info("Voice RESUME succeeded.")
elif op == self.SESSION_DESCRIPTION:
self._connection.mode = data["mode"]
await self.load_secret_key(data)
elif op == self.HELLO:
interval = data["heartbeat_interval"] / 1000.0
self._keep_alive = VoiceKeepAliveHandler(
ws=self, interval=min(interval, 5.0)
)
self._keep_alive.start()
await self._hook(self, msg)
async def initial_connection(self, data):
state = self._connection
state.ssrc = data["ssrc"]
state.voice_port = data["port"]
state.endpoint_ip = data["ip"]
packet = bytearray(70)
struct.pack_into(">H", packet, 0, 1) # 1 = Send
struct.pack_into(">H", packet, 2, 70) # 70 = Length
struct.pack_into(">I", packet, 4, state.ssrc)
state.socket.sendto(packet, (state.endpoint_ip, state.voice_port))
recv = await self.loop.sock_recv(state.socket, 70)
_log.debug("received packet in initial_connection: %s", recv)
# the ip is ascii starting at the 4th byte and ending at the first null
ip_start = 4
ip_end = recv.index(0, ip_start)
state.ip = recv[ip_start:ip_end].decode("ascii")
state.port = struct.unpack_from(">H", recv, len(recv) - 2)[0]
_log.debug("detected ip: %s port: %s", state.ip, state.port)
# there *should* always be at least one supported mode (xsalsa20_poly1305)
modes = [
mode for mode in data["modes"] if mode in self._connection.supported_modes
]
_log.debug("received supported encryption modes: %s", ", ".join(modes))
mode = modes[0]
await self.select_protocol(state.ip, state.port, mode)
_log.info("selected the voice protocol for use (%s)", mode)
@property
def latency(self):
""":class:`float`: Latency between a HEARTBEAT and its HEARTBEAT_ACK in seconds."""
heartbeat = self._keep_alive
return float("inf") if heartbeat is None else heartbeat.latency
@property
def average_latency(self):
""":class:`list`: Average of last 20 HEARTBEAT latencies."""
heartbeat = self._keep_alive
if heartbeat is None or not heartbeat.recent_ack_latencies:
return float("inf")
return sum(heartbeat.recent_ack_latencies) / len(heartbeat.recent_ack_latencies)
async def load_secret_key(self, data):
_log.info("received secret key for voice connection")
self.secret_key = self._connection.secret_key = data.get("secret_key")
await self.speak()
await self.speak(False)
async def poll_event(self):
# This exception is handled up the chain
msg = await asyncio.wait_for(self.ws.receive(), timeout=30.0)
if msg.type is aiohttp.WSMsgType.TEXT:
await self.received_message(utils._from_json(msg.data))
elif msg.type is aiohttp.WSMsgType.ERROR:
_log.debug("Received %s", msg)
raise ConnectionClosed(self.ws, shard_id=None) from msg.data
elif msg.type in (
aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.CLOSE,
aiohttp.WSMsgType.CLOSING,
):
_log.debug("Received %s", msg)
raise ConnectionClosed(self.ws, shard_id=None, code=self._close_code)
async def close(self, code=1000):
if self._keep_alive is not None:
self._keep_alive.stop()
self._close_code = code
await self.ws.close(code=code)
|
py | 1a387c629462fe5739d003b96a94bef6839f533f | __all__ = ["__version__", "__version_info__"]
__version__ = "0.7.4"
__version_info__ = tuple(int(x) for x in __version__.split("."))
|
py | 1a387c6fb8f4edcdbbac2f01dfefaac6c1865842 | import subprocess
import multiprocessing
import logging
import os.path
import pygments.util
import pygments.lexers
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("classify_linguist")
from .common import *
def classify_pygments(path):
with open(path, "rb") as f:
data = f.read()
try:
return pygments.lexers.guess_lexer_for_filename(path, data).name
except pygments.util.ClassNotFound:
return None
def main():
meta = Meta()
dataset = Dataset()
files = list(dataset.data["files"].keys())
with multiprocessing.Pool() as p:
languages = p.map(classify_pygments, [os.path.join("data", f) for f in files])
for f, l in zip(files, languages):
l1 = l
if l1:
norm_lang = meta.to_normalized_language(dataset="pygments", lang=l1)
dataset.data["files"][f]["annotations"]["pygments-filename"] = norm_lang
else:
if "pygments-filename" in dataset.data["files"][f]["annotations"]:
del dataset.data["files"][f]["annotations"]["pygments-filename"]
dataset.save()
if __name__ == "__main__":
multiprocessing.log_to_stderr(logging.DEBUG)
main()
|
py | 1a387e3813c188016966558005ccd7e7a03975c8 | import configparser
import functools
import arrow
cf = configparser.ConfigParser()
configFile = "config/config.ini"
# configFile = "F:\\code_space\\eniac\\factor_server_docker\\ENIAC\\config\\config.ini"
cf.read(configFile)
"配置来源的属性"
#es
dbHost = cf.get("service_es", "db_host")
dbPort = cf.getint("service_es", "db_port")
dbUser = cf.get("service_es", "db_user")
dbPass = cf.get("service_es", "db_pass")
#log_conf
log_conf = cf.get("file", "log_conf")
# kafka
kafkaHost1 = cf.get("service_kafka", "db_host_1")
kafkaHost2 = cf.get("service_kafka", "db_host_2")
kafkaHost3 = cf.get("service_kafka", "db_host_3")
kafkaPort = cf.get("service_kafka", "db_port")
kafkaTopic = cf.get("service_kafka", "db_topic")
kafkaList = [f'{kafkaHost1}:{kafkaPort}',
f'{kafkaHost2}:{kafkaPort}',
f'{kafkaHost3}:{kafkaPort}']
# import logging
# import logging.config
#
# logging.config.fileConfig(log_conf)
# logger = logging.getLogger("neo")
#
# import warnings
# warnings.filterwarnings("ignore") # 防止警告输出
|
py | 1a387e46fe3ef4e98ac795c148ab159eb65c055c | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
from contextlib import contextmanager
@contextmanager
def db_helper():
"""
Database helper function using context lib. Creates a cursor from a
database connection object, yields that cursor to the other functions to
perform queries and then cleans up making the commits and closures.
"""
db = connect()
c = db.cursor()
try:
yield c
except:
raise
else:
db.commit()
finally:
c.close()
db.close()
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
try:
return psycopg2.connect("dbname='tournament'")
except:
print ("Connection failed")
def deleteMatches():
"""Removes all the match records from the database."""
with db_helper() as c:
c.execute("TRUNCATE matches")
def deletePlayers():
"""Removes all the player records from the database."""
with db_helper() as c:
c.execute("TRUNCATE players CASCADE")
def countPlayers():
"""Returns the number of players currently registered."""
with db_helper() as c:
c.execute("SELECT count (*) FROM players")
return c.fetchone()[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
with db_helper() as c:
query1 = "INSERT INTO players (player_name) VALUES (%s);"
data = (name,)
c.execute(query1, data)
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list will be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
with db_helper() as c:
c.execute(
"""SELECT players.id, players.player_name, count(matches.winner)as wins,
(SELECT count(*) FROM matches
WHERE matches.loser = players.id
OR matches.winner = players.id) as matches
FROM players LEFT JOIN matches
ON players.id = matches.winner
GROUP BY players.id
ORDER BY wins DESC
""")
rows = c.fetchall()
player_standings = []
for row in rows:
player_standings.append(row)
return player_standings
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
with db_helper() as c:
query = "INSERT INTO matches (winner, loser) VALUES (%s, %s);"
winner_id = (winner,)
loser_id = (loser,)
c.execute(query, (winner_id, loser_id))
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
players_id_name = []
players = playerStandings()
for row in players:
player_id_name = (row[0], row[1])
players_id_name.append(player_id_name)
pairings = []
index = 0
while index < len(players_id_name):
pairings.append(players_id_name[index] + players_id_name[index+1])
index += 2
return pairings
|
py | 1a387e48b96fe667d6fea6290d220abf6284ab46 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018, 2020, 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Reana-Server Ping-functionality Flask-Blueprint."""
from flask import Blueprint, jsonify
blueprint = Blueprint("ping", __name__)
@blueprint.route("/ping", methods=["GET"])
def ping(): # noqa
r"""Endpoint to ping the server. Responds with a pong.
---
get:
summary: Ping the server (healthcheck)
operationId: ping
description: >-
Ping the server.
produces:
- application/json
responses:
200:
description: >-
Ping succeeded. Service is running and accessible.
schema:
type: object
properties:
message:
type: string
status:
type: string
examples:
application/json:
message: OK
status: 200
"""
return jsonify(message="OK", status="200"), 200
|
py | 1a387eb256175e08023f32dfe6c753fb641fc1e2 | import csv
from datetime import datetime
from StringIO import StringIO
from collections import namedtuple
## Helper constants
DATE_FMT = "%Y-%m-%d %H:%M:%S" # 2013-09-16 12:23:33
## Named Tuple
Customer = namedtuple('Customer', ('id', 'name', 'email', 'gender', 'registered', 'city', 'state', 'zip'))
def parse(row):
"""
Parses a row and returns a named tuple.
"""
row[0] = int(row[0]) # Parse ID to an integer
row[4] = datetime.strptime(row[4], DATE_FMT)
return Customer(*row)
def split(line):
"""
Operator function for splitting a line on a delimiter.
"""
reader = csv.reader(StringIO(line))
return reader.next()
|
py | 1a387f07bfd03a9543d8398e123abf0639ad249b | import torch
import torch.nn as nn
from .base import Attack, LabelMixin
from .utils import batch_multiply
from .utils import clamp
class FGSMAttack(Attack, LabelMixin):
"""
One step fast gradient sign method (Goodfellow et al, 2014).
Arguments:
predict (nn.Module): forward pass function.
loss_fn (nn.Module): loss function.
eps (float): attack step size.
clip_min (float): mininum value per input dimension.
clip_max (float): maximum value per input dimension.
targeted (bool): indicate if this is a targeted attack.
"""
def __init__(self, predict, loss_fn=None, eps=0.3, clip_min=0., clip_max=1., targeted=False):
super(FGSMAttack, self).__init__(predict, loss_fn, clip_min, clip_max)
self.eps = eps
self.targeted = targeted
if self.loss_fn is None:
self.loss_fn = nn.CrossEntropyLoss(reduction="sum")
def perturb(self, x, y=None):
"""
Given examples (x, y), returns their adversarial counterparts with an attack length of eps.
Arguments:
x (torch.Tensor): input tensor.
y (torch.Tensor): label tensor.
- if None and self.targeted=False, compute y as predicted labels.
- if self.targeted=True, then y must be the targeted labels.
Returns:
torch.Tensor containing perturbed inputs.
torch.Tensor containing the perturbation.
"""
x, y = self._verify_and_process_inputs(x, y)
xadv = x.requires_grad_()
outputs = self.predict(xadv)
loss = self.loss_fn(outputs, y)
if self.targeted:
loss = -loss
loss.backward()
grad_sign = xadv.grad.detach().sign()
xadv = xadv + batch_multiply(self.eps, grad_sign)
xadv = clamp(xadv, self.clip_min, self.clip_max)
radv = xadv - x
return xadv.detach(), radv.detach()
LinfFastGradientAttack = FGSMAttack
class FGMAttack(Attack, LabelMixin):
"""
One step fast gradient method. Perturbs the input with gradient (not gradient sign) of the loss wrt the input.
Arguments:
predict (nn.Module): forward pass function.
loss_fn (nn.Module): loss function.
eps (float): attack step size.
clip_min (float): mininum value per input dimension.
clip_max (float): maximum value per input dimension.
targeted (bool): indicate if this is a targeted attack.
"""
def __init__(self, predict, loss_fn=None, eps=0.3, clip_min=0., clip_max=1., targeted=False):
super(FGMAttack, self).__init__(
predict, loss_fn, clip_min, clip_max)
self.eps = eps
self.targeted = targeted
if self.loss_fn is None:
self.loss_fn = nn.CrossEntropyLoss(reduction="sum")
def perturb(self, x, y=None):
"""
Given examples (x, y), returns their adversarial counterparts with an attack length of eps.
Arguments:
x (torch.Tensor): input tensor.
y (torch.Tensor): label tensor.
- if None and self.targeted=False, compute y as predicted labels.
- if self.targeted=True, then y must be the targeted labels.
Returns:
torch.Tensor containing perturbed inputs.
torch.Tensor containing the perturbation.
"""
x, y = self._verify_and_process_inputs(x, y)
xadv = x.requires_grad_()
outputs = self.predict(xadv)
loss = self.loss_fn(outputs, y)
if self.targeted:
loss = -loss
loss.backward()
grad = normalize_by_pnorm(xadv.grad)
xadv = xadv + batch_multiply(self.eps, grad)
xadv = clamp(xadv, self.clip_min, self.clip_max)
radv = xadv - x
return xadv.detach(), radv.detach()
L2FastGradientAttack = FGMAttack |
py | 1a387f0a7ab6513e1c619e55b24ae2df58847192 | # coding=utf-8
from __future__ import absolute_import, print_function
import posixpath
from urllib import urlencode
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import parse_qsl, urlsplit, urlunsplit
__author__ = 'Tyler Butler <[email protected]>'
try:
# noinspection PyUnresolvedReferences
from propane.flask.urls import *
except ImportError:
pass
def remove_query_parameters(url, params=None, case_sensitive=False):
def is_in(to_check, iterable, cs):
if cs:
return to_check in iterable
else:
return to_check.upper().lower() in iterable
pieces = list(urlsplit(url))
if params is None:
pieces[3] = ''
else:
if not case_sensitive:
params[:] = [p.upper().lower() for p in params]
query = parse_qsl(pieces[3])
query[:] = [(param, value) for param, value in query if not is_in(param, params, case_sensitive)]
pieces[3] = urlencode(query, doseq=True)
return urlunsplit(pieces)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
|
py | 1a38822ea5eb5e2d9d639c46bd8ab015c586ef1f | """factories for creating mincVolumes"""
from .volumes import mincException, mincVolume, getDtype, transform_xyz_coordinates_using_xfm, transform_multiple_xyz_coordinates_using_xfm
def volumeFromFile(filename, dtype="double", readonly=True, labels=False):
"""creates a new mincVolume from existing file."""
v = mincVolume(filename=filename, dtype=dtype, readonly=readonly, labels=labels)
v.openFile()
return(v)
def volumeFromInstance(volInstance, outputFilename, dtype="double", data=False,
dims=None, volumeType=None, path=False, labels=False):
"""creates new mincVolume from another mincVolume"""
v = mincVolume(filename=outputFilename, dtype=dtype, readonly=False, labels=labels)
v.copyDimensions(volInstance, dims)
v.copyDtype(volInstance)
v.createVolumeHandle(volumeType or volInstance.volumeType)
v.copyHistory(volInstance)
if data:
if not volInstance.dataLoaded:
volInstance.loadData()
v.createVolumeImage()
v.data = volInstance.data.copy()
if path:
v.copyAttributes(volInstance, path)
return(v)
def volumeLikeFile(likeFilename, outputFilename, dtype="double", volumeType=None,
labels=False, data=False):
"""creates a new mincVolume with dimension info taken from an existing file"""
lf = volumeFromFile(filename=likeFilename, dtype=dtype, labels=labels)
v = volumeFromInstance(volInstance=lf, outputFilename=outputFilename,
dtype=dtype, volumeType=volumeType,
labels=labels, data=data)
lf.closeVolume()
return(v)
def volumeFromDescription(outputFilename, dimnames, sizes, starts, steps, volumeType="ushort",
dtype="double", labels=False,
x_dir_cosines=(1.0,0.0,0.0),
y_dir_cosines=(0.0,1.0,0.0),
z_dir_cosines=(0.0,0.0,1.0)):
"""creates a new mincVolume given starts, steps, sizes, and dimension names"""
v = mincVolume(filename=outputFilename, dtype=dtype, readonly=False, labels=labels)
v.createNewDimensions(dimnames, sizes, starts, steps,
x_dir_cosines, y_dir_cosines, z_dir_cosines)
v.createVolumeHandle(volumeType)
v.createVolumeImage()
return(v)
def volumeFromData(outputFilename, data, dimnames=("xspace", "yspace", "zspace"),
starts=(0,0,0), steps=(1,1,1),
volumeType="ushort", dtype=None, labels=False,
x_dir_cosines=(1.0,0.0,0.0),
y_dir_cosines=(0.0,1.0,0.0),
z_dir_cosines=(0.0,0.0,1.0)):
"""creates a mincVolume from a given array"""
# deal with the dtype. If the dtype was not set, use the dtype of the
# data block. If that is not possible, default to double.
if dtype == None:
if getDtype(data):
dtype = getDtype(data)
else:
dtype = "double"
v = volumeFromDescription(outputFilename=outputFilename, sizes=data.shape,
dimnames=dimnames, starts=starts, steps=steps,
volumeType=volumeType, dtype=dtype, labels=labels,
x_dir_cosines=x_dir_cosines,
y_dir_cosines=y_dir_cosines,
z_dir_cosines=z_dir_cosines)
v.data = data
return v
|
py | 1a388349e2ad96e522ae213945fe13a8ab6a584e | from flask import Flask,send_from_directory
app = Flask(__name__)
app.secret_key = 'test'
@app.route('/')
def home():
return "Proxy is up!"
@app.route('/proxy/', methods=['GET'])
def result():
return send_from_directory('','proxy.json')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0',port=5000,use_reloader=True)
|
py | 1a3883b6c386c2922484ebcd3e8755ea62c057be | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Gimel Studio'
copyright = '2019-2021, Noah Rahm and contributors'
author = 'Noah Rahm and contributors'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['build', 'README.md']
# The suffix of source filenames.
source_suffix = '.rst'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Logo
html_logo = "./logo.png"
# Setup
def setup(app):
app.add_css_file('css/custom.css')
|
py | 1a3884b96b8e8ef839c5969c8b569254a4b8b546 | """
Apicurio Registry API [v2]
Apicurio Registry is a datastore for standard event schemas and API designs. Apicurio Registry enables developers to manage and share the structure of their data using a REST interface. For example, client applications can dynamically push or pull the latest updates to or from the registry without needing to redeploy. Apicurio Registry also enables developers to create rules that govern how registry content can evolve over time. For example, this includes rules for content validation and version compatibility. The Apicurio Registry REST API enables client applications to manage the artifacts in the registry. This API provides create, read, update, and delete operations for schema and API artifacts, rules, versions, and metadata. The supported artifact types include: - Apache Avro schema - AsyncAPI specification - Google protocol buffers - GraphQL schema - JSON Schema - Kafka Connect schema - OpenAPI specification - Web Services Description Language - XML Schema Definition **Important**: The Apicurio Registry REST API is available from `https://MY-REGISTRY-URL/apis/registry/v2` by default. Therefore you must prefix all API operation paths with `../apis/registry/v2` in this case. For example: `../apis/registry/v2/ids/globalIds/{globalId}`. # noqa: E501
The version of the OpenAPI document: 2.2.4-SNAPSHOT
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import json
import atexit
import mimetypes
from multiprocessing.pool import ThreadPool
import io
import os
import re
import typing
from urllib.parse import quote
from urllib3.fields import RequestField
from apicurioregistryclient import rest
from apicurioregistryclient.configuration import Configuration
from apicurioregistryclient.exceptions import ApiTypeError, ApiValueError, ApiException
from apicurioregistryclient.model_utils import (
ModelNormal,
ModelSimple,
ModelComposed,
check_allowed_values,
check_validations,
date,
datetime,
deserialize_file,
file_type,
model_to_dict,
none_type,
validate_and_convert_types
)
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None,
_content_type: typing.Optional[str] = None
):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
if header_params['Content-Type'].startswith("multipart"):
post_params = self.parameters_to_multipart(post_params,
(dict) )
# body
if body:
body = self.sanitize_for_serialization(body)
# auth setting
self.update_params_for_auth(header_params, query_params,
auth_settings, resource_path, method, body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8')
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return (return_data)
return return_data
# deserialize response data
if response_type:
if response_type != (file_type,):
encoding = "utf-8"
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
if match:
encoding = match.group(1)
response_data.data = response_data.data.decode(encoding)
return_data = self.deserialize(
response_data,
response_type,
_check_type
)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def parameters_to_multipart(self, params, collection_types):
"""Get parameters as list of tuples, formatting as json if value is collection_types
:param params: Parameters as list of two-tuples
:param dict collection_types: Parameter collection types
:return: Parameters as list of tuple or urllib3.fields.RequestField
"""
new_params = []
if collection_types is None:
collection_types = (dict)
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if isinstance(v, collection_types): # v is instance of collection_type, formatting as application/json
v = json.dumps(v, ensure_ascii=False).encode("utf-8")
field = RequestField(k, v)
field.make_multipart(content_type="application/json; charset=utf-8")
new_params.append(field)
else:
new_params.append((k, v))
return new_params
@classmethod
def sanitize_for_serialization(cls, obj):
"""Prepares data for transmission before it is sent with the rest client
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
If obj is io.IOBase, return the bytes
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val) for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, io.IOBase):
return cls.get_file_data_and_close_file(obj)
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {key: cls.sanitize_for_serialization(val) for key, val in obj.items()}
raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__))
def deserialize(self, response, response_type, _check_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(response.data, self.configuration,
content_disposition=content_disposition)
# fetch data from response object
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
# store our data under the key of 'received_data' so users have some
# context if they are deserializing a string and the data type is wrong
deserialized_data = validate_and_convert_types(
received_data,
response_type,
['received_data'],
True,
_check_type,
configuration=self.configuration
)
return deserialized_data
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req bool: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_check_type)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _check_type))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
@staticmethod
def get_file_data_and_close_file(file_instance: io.IOBase) -> bytes:
file_data = file_instance.read()
file_instance.close()
return file_data
def files_parameters(self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None):
"""Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:return: List of tuples of form parameters with file data
"""
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
# if the file field is nullable, skip None values
continue
for file_instance in file_instances:
if file_instance is None:
# if the file field is nullable, skip None values
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = self.get_file_data_and_close_file(file_instance)
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types, method=None, body=None):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:param method: http method (e.g. POST, PATCH).
:param body: http body to send.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if (method == 'PATCH' and
'application/json-patch+json' in content_types and
isinstance(body, list)):
return 'application/json-patch+json'
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, queries, auth_settings,
resource_path, method, body):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
queries.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only',
'_check_input_type',
'_check_return_type',
'_content_type',
'_spec_property_naming'
])
self.params_map['nullable'].extend(['_request_timeout'])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
extra_types = {
'async_req': (bool,),
'_host_index': (none_type, int),
'_preload_content': (bool,),
'_request_timeout': (none_type, float, (float,), [float], int, (int,), [int]),
'_return_http_data_only': (bool,),
'_check_input_type': (bool,),
'_check_return_type': (bool,),
'_spec_property_naming': (bool,),
'_content_type': (none_type, str)
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param]
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration
)
if kwargs['_check_input_type'] is False:
return
for key, value in kwargs.items():
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
kwargs['_spec_property_naming'],
kwargs['_check_input_type'],
configuration=self.api_client.configuration
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in kwargs.items():
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == (file_type,)):
params['file'][base_name] = [param_value]
elif (param_location == 'form' and
self.openapi_types[param_name] == ([file_type],)):
# param_value is already a list
params['file'][base_name] = param_value
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
api_instance = AdminApi()
api_instance.create_global_rule # this is an instance of the class Endpoint
api_instance.create_global_rule() # this invokes api_instance.create_global_rule.__call__()
which then invokes the callable functions stored in that endpoint at
api_instance.create_global_rule.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
index = self.api_client.configuration.server_operation_index.get(
self.settings['operation_id'], self.api_client.configuration.server_index
) if kwargs['_host_index'] is None else kwargs['_host_index']
server_variables = self.api_client.configuration.server_operation_variables.get(
self.settings['operation_id'], self.api_client.configuration.server_variables
)
_host = self.api_client.configuration.get_host_from_settings(
index, variables=server_variables, servers=self.settings['servers']
)
except IndexError:
if self.settings['servers']:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
_host = None
for key, value in kwargs.items():
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (key not in self.params_map['nullable'] and value is None
and kwargs['_check_input_type'] is False):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
if kwargs.get('_content_type'):
params['header']['Content-Type'] = kwargs['_content_type']
else:
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
if params['body'] != "":
header_list = self.api_client.select_header_content_type(
content_type_headers_list, self.settings['http_method'],
params['body'])
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs['async_req'],
_check_type=kwargs['_check_return_type'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
_host=_host,
collection_formats=params['collection_format'])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.