ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40678810b5d99a662cccc54475bea12069ffe9d | import click
import logging
import os
import subprocess
import time
from threading import Thread
from ray.autoscaler.tags import TAG_RAY_NODE_STATUS, TAG_RAY_RUNTIME_CONFIG, \
TAG_RAY_FILE_MOUNTS_CONTENTS, \
STATUS_UP_TO_DATE, STATUS_UPDATE_FAILED, STATUS_WAITING_FOR_SSH, \
STATUS_SETTING_UP, STATUS_SYNCING_FILES
from ray.autoscaler.command_runner import NODE_START_WAIT_S, SSHOptions
from ray.autoscaler.log_timer import LogTimer
from ray.autoscaler.cli_logger import cli_logger
import colorful as cf
logger = logging.getLogger(__name__)
READY_CHECK_INTERVAL = 5
class NodeUpdater:
"""A process for syncing files and running init commands on a node."""
def __init__(self,
node_id,
provider_config,
provider,
auth_config,
cluster_name,
file_mounts,
initialization_commands,
setup_commands,
ray_start_commands,
runtime_hash,
file_mounts_contents_hash,
cluster_synced_files=None,
process_runner=subprocess,
use_internal_ip=False,
docker_config=None):
self.log_prefix = "NodeUpdater: {}: ".format(node_id)
use_internal_ip = (use_internal_ip
or provider_config.get("use_internal_ips", False))
self.cmd_runner = provider.get_command_runner(
self.log_prefix, node_id, auth_config, cluster_name,
process_runner, use_internal_ip, docker_config)
self.daemon = True
self.process_runner = process_runner
self.node_id = node_id
self.provider = provider
self.file_mounts = {
remote: os.path.expanduser(local)
for remote, local in file_mounts.items()
}
self.initialization_commands = initialization_commands
self.setup_commands = setup_commands
self.ray_start_commands = ray_start_commands
self.runtime_hash = runtime_hash
self.file_mounts_contents_hash = file_mounts_contents_hash
self.cluster_synced_files = cluster_synced_files
self.auth_config = auth_config
def run(self):
cli_logger.old_info(logger, "{}Updating to {}", self.log_prefix,
self.runtime_hash)
try:
with LogTimer(self.log_prefix +
"Applied config {}".format(self.runtime_hash)):
self.do_update()
except Exception as e:
error_str = str(e)
if hasattr(e, "cmd"):
error_str = "(Exit Status {}) {}".format(
e.returncode, " ".join(e.cmd))
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED})
cli_logger.error("New status: {}", cf.bold(STATUS_UPDATE_FAILED))
cli_logger.old_error(logger, "{}Error executing: {}\n",
self.log_prefix, error_str)
cli_logger.error("!!!")
if hasattr(e, "cmd"):
cli_logger.error(
"Setup command `{}` failed with exit code {}. stderr:",
cf.bold(e.cmd), e.returncode)
else:
cli_logger.verbose_error("{}", str(vars(e)))
# todo: handle this better somehow?
cli_logger.error("{}", str(e))
# todo: print stderr here
cli_logger.error("!!!")
cli_logger.newline()
if isinstance(e, click.ClickException):
# todo: why do we ignore this here
return
raise
tags_to_set = {
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_RUNTIME_CONFIG: self.runtime_hash,
}
if self.file_mounts_contents_hash is not None:
tags_to_set[
TAG_RAY_FILE_MOUNTS_CONTENTS] = self.file_mounts_contents_hash
self.provider.set_node_tags(self.node_id, tags_to_set)
cli_logger.labeled_value("New status", STATUS_UP_TO_DATE)
self.exitcode = 0
def sync_file_mounts(self, sync_cmd):
nolog_paths = []
if cli_logger.verbosity == 0:
nolog_paths = [
"~/ray_bootstrap_key.pem", "~/ray_bootstrap_config.yaml"
]
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix +
"Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run("mkdir -p {}".format(
os.path.dirname(remote_path)))
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path),
cf.bold(local_path))
# Rsync file mounts
with cli_logger.group(
"Processing file mounts", _numbered=("[]", 2, 6)):
for remote_path, local_path in self.file_mounts.items():
do_sync(remote_path, local_path)
if self.cluster_synced_files:
with cli_logger.group(
"Processing worker file mounts", _numbered=("[]", 3, 6)):
for path in self.cluster_synced_files:
do_sync(path, path, allow_non_existing_paths=True)
else:
cli_logger.print(
"No worker file mounts to sync", _numbered=("[]", 3, 6))
def wait_ready(self, deadline):
with cli_logger.group(
"Waiting for SSH to become available", _numbered=("[]", 1, 6)):
with LogTimer(self.log_prefix + "Got remote shell"):
cli_logger.old_info(logger, "{}Waiting for remote shell...",
self.log_prefix)
cli_logger.print("Running `{}` as a test.", cf.bold("uptime"))
while time.time() < deadline and \
not self.provider.is_terminated(self.node_id):
try:
cli_logger.old_debug(logger,
"{}Waiting for remote shell...",
self.log_prefix)
self.cmd_runner.run("uptime")
cli_logger.old_debug(logger, "Uptime succeeded.")
cli_logger.success("Success.")
return True
except Exception as e:
retry_str = str(e)
if hasattr(e, "cmd"):
retry_str = "(Exit Status {}): {}".format(
e.returncode, " ".join(e.cmd))
cli_logger.print(
"SSH still not available {}, "
"retrying in {} seconds.", cf.gray(retry_str),
cf.bold(str(READY_CHECK_INTERVAL)))
cli_logger.old_debug(logger,
"{}Node not up, retrying: {}",
self.log_prefix, retry_str)
time.sleep(READY_CHECK_INTERVAL)
assert False, "Unable to connect to node"
def do_update(self):
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_WAITING_FOR_SSH})
cli_logger.labeled_value("New status", STATUS_WAITING_FOR_SSH)
deadline = time.time() + NODE_START_WAIT_S
self.wait_ready(deadline)
node_tags = self.provider.node_tags(self.node_id)
logger.debug("Node tags: {}".format(str(node_tags)))
# runtime_hash will only change whenever the user restarts
# or updates their cluster with `get_or_create_head_node`
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) == self.runtime_hash and (
self.file_mounts_contents_hash is None
or node_tags.get(TAG_RAY_FILE_MOUNTS_CONTENTS) ==
self.file_mounts_contents_hash):
# todo: we lie in the confirmation message since
# full setup might be cancelled here
cli_logger.print(
"Configuration already up to date, "
"skipping file mounts, initalization and setup commands.")
cli_logger.old_info(logger,
"{}{} already up-to-date, skip to ray start",
self.log_prefix, self.node_id)
else:
cli_logger.print(
"Updating cluster configuration.",
_tags=dict(hash=self.runtime_hash))
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SYNCING_FILES})
cli_logger.labeled_value("New status", STATUS_SYNCING_FILES)
self.sync_file_mounts(self.rsync_up)
# Only run setup commands if runtime_hash has changed because
# we don't want to run setup_commands every time the head node
# file_mounts folders have changed.
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) != self.runtime_hash:
# Run init commands
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SETTING_UP})
cli_logger.labeled_value("New status", STATUS_SETTING_UP)
if self.initialization_commands:
with cli_logger.group(
"Running initialization commands",
_numbered=("[]", 4,
6)): # todo: fix command numbering
with LogTimer(
self.log_prefix + "Initialization commands",
show_status=True):
for cmd in self.initialization_commands:
self.cmd_runner.run(
cmd,
ssh_options_override=SSHOptions(
self.auth_config.get(
"ssh_private_key")))
else:
cli_logger.print(
"No initialization commands to run.",
_numbered=("[]", 4, 6))
if self.setup_commands:
with cli_logger.group(
"Running setup commands",
_numbered=("[]", 5,
6)): # todo: fix command numbering
with LogTimer(
self.log_prefix + "Setup commands",
show_status=True):
total = len(self.setup_commands)
for i, cmd in enumerate(self.setup_commands):
if cli_logger.verbosity == 0:
cmd_to_print = cf.bold(cmd[:30]) + "..."
else:
cmd_to_print = cf.bold(cmd)
cli_logger.print(
"{}",
cmd_to_print,
_numbered=("()", i, total))
self.cmd_runner.run(cmd)
else:
cli_logger.print(
"No setup commands to run.", _numbered=("[]", 5, 6))
with cli_logger.group(
"Starting the Ray runtime", _numbered=("[]", 6, 6)):
with LogTimer(
self.log_prefix + "Ray start commands", show_status=True):
for cmd in self.ray_start_commands:
self.cmd_runner.run(cmd)
def rsync_up(self, source, target):
cli_logger.old_info(logger, "{}Syncing {} to {}...", self.log_prefix,
source, target)
self.cmd_runner.run_rsync_up(source, target)
cli_logger.verbose("`rsync`ed {} (local) to {} (remote)",
cf.bold(source), cf.bold(target))
def rsync_down(self, source, target):
cli_logger.old_info(logger, "{}Syncing {} from {}...", self.log_prefix,
source, target)
self.cmd_runner.run_rsync_down(source, target)
cli_logger.verbose("`rsync`ed {} (remote) to {} (local)",
cf.bold(source), cf.bold(target))
class NodeUpdaterThread(NodeUpdater, Thread):
def __init__(self, *args, **kwargs):
Thread.__init__(self)
NodeUpdater.__init__(self, *args, **kwargs)
self.exitcode = -1
|
py | b40678815476f2f9d07f7ca5b749dd19048b238f | """Test module for jpl/rules/task/task."""
import pytest
from jpl.rules.task.task import (
TaskHasName,
TaskHasDescription,
TaskHasFunction
)
from tests.test_utils import load_from_json
class MockTask:
def __init__(self):
self.data = load_from_json(
fp="testData/task.json"
)
@property
def passing(self):
return self.data
@property
def name_empty(self):
self.data["name"] = ""
return self.data
@property
def name_missing(self):
self.data.pop("name")
return self.data
@property
def desc_empty(self):
self.data["description"] = ""
return self.data
@property
def desc_missing(self):
self.data.pop("description")
return self.data
@property
def function_missing(self):
self.data.pop("function")
return self.data
@pytest.mark.parametrize(
"task, expected",
[
(MockTask().passing, "PASSED"),
(MockTask().name_empty, "FAILED"),
(MockTask().name_missing, "FAILED")
]
)
def test_task_has_name(task, expected):
rule = TaskHasName()
result = rule.run(
playbook=task
)
assert result == expected
@pytest.mark.parametrize(
"task, expected",
[
(MockTask().passing, "PASSED"),
(MockTask().desc_empty, "WARNING"),
(MockTask().desc_missing, "WARNING")
]
)
def test_task_has_desc(task, expected):
rule = TaskHasDescription()
result = rule.run(
playbook=task
)
assert result == expected
@pytest.mark.parametrize(
"task, expected",
[
(MockTask().passing, "PASSED"),
(MockTask().function_missing, "FAILED")
]
)
def test_task_has_function(task, expected):
rule = TaskHasFunction()
result = rule.run(
playbook=task
)
assert result == expected
|
py | b40678d12c112d28a727f19d6c87ad647099d189 | from typing import List
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
def get_wacc_graphics():
equal_graphic = EquityDebtWACCGraphicForHalfAndHalf(
0.16,
0.08,
0.5,
0.5,
0.35
)
seventy_five_percent_equity_graphic = EquityDebtWACCGraphicForSeventyFivePercentEquity(
0.16,
0.08,
0.75,
0.25,
0.35
)
return [
equal_graphic,
seventy_five_percent_equity_graphic
]
# TODO [#15]: this whole thing is a mess. Tried to make one reusable class for creating this graphic, but was having issues
# TODO [#16]: actually getting it to work. It seems the graphics sizes are not working as expected. Therefore I made a
# TODO [#17]: separate class for each version of the graphic, with some values hard-coded, and these classes are not
# TODO [#18]: reusable at all.
class EquityDebtWACCGraphicForSeventyFivePercentEquity(pl.Template):
def __init__(self, cost_of_equity: float, cost_of_debt: float, weight_of_equity: float, weight_of_debt: float, tax_rate: float):
self.cost_of_equity = cost_of_equity
self.cost_of_debt = cost_of_debt
self.weight_of_equity = weight_of_equity
self.weight_of_debt = weight_of_debt
self.tax_rate = tax_rate
self.contents = self._get_contents()
super().__init__()
@property
def wacc(self):
return self.cost_of_equity * self.weight_of_equity + self.after_tax_cost_of_debt * self.weight_of_debt
@property
def after_tax_cost_of_debt(self):
return self.cost_of_debt * (1 - self.tax_rate)
def _get_contents(self):
all_node_options = [
'every text node part/.style={align=center}'
]
debt_options = all_node_options + [
'fill=blue'
]
debt_text_options = all_node_options + [
'text=white'
]
equity_options = all_node_options + [
'fill=orange'
]
wacc_options = all_node_options + [
'fill=violet!80'
]
debt_equity_width = 3
total_height = 4
debt_height = self.weight_of_debt * total_height
equity_height = self.weight_of_equity * total_height
debt_contents = [
'Debt',
pl.OutputLineBreak(),
f'Pre-tax: {self.cost_of_debt:.2%}',
pl.OutputLineBreak(),
f'After: {self.after_tax_cost_of_debt:.2%}',
]
equity_contents = [
'Equity',
pl.OutputLineBreak(),
f'{self.cost_of_equity:.2%}'
]
wacc_contents = ['WACC', pl.OutputLineBreak(), f'{self.wacc:.2%}']
debt_rect = lg.Rectangle(debt_equity_width, debt_height, debt_contents, shape_options=debt_options, text_options=debt_text_options)
equity_rect = lg.Rectangle(debt_equity_width, equity_height, equity_contents, offset=(0, 2.15), shape_options=equity_options)
wacc_rect = lg.Rectangle(2, 4.3, wacc_contents, offset=(3, 1.5), shape_options=wacc_options)
contents = lg.TikZPicture(
[
pl.TextSize(-1),
debt_rect,
equity_rect,
wacc_rect,
lg.Arrow(debt_rect, wacc_rect),
lg.Arrow(equity_rect, wacc_rect)
]
)
return contents
class EquityDebtWACCGraphicForHalfAndHalf(pl.Template):
def __init__(self, cost_of_equity: float, cost_of_debt: float, weight_of_equity: float, weight_of_debt: float,
tax_rate: float):
self.cost_of_equity = cost_of_equity
self.cost_of_debt = cost_of_debt
self.weight_of_equity = weight_of_equity
self.weight_of_debt = weight_of_debt
self.tax_rate = tax_rate
self.contents = self._get_contents()
super().__init__()
@property
def wacc(self):
return self.cost_of_equity * self.weight_of_equity + self.after_tax_cost_of_debt * self.weight_of_debt
@property
def after_tax_cost_of_debt(self):
return self.cost_of_debt * (1 - self.tax_rate)
def _get_contents(self):
all_node_options = [
'every text node part/.style={align=center}'
]
debt_options = all_node_options + [
'fill=blue'
]
debt_text_options = all_node_options + [
'text=white'
]
equity_options = all_node_options + [
'fill=orange'
]
wacc_options = all_node_options + [
'fill=violet!80'
]
debt_equity_width = 3
total_height = 4
debt_height = self.weight_of_debt * total_height
equity_height = self.weight_of_equity * total_height
debt_contents = [
'Debt',
pl.OutputLineBreak(),
f'Pre-tax: {self.cost_of_debt:.2%}',
pl.OutputLineBreak(),
f'After: {self.after_tax_cost_of_debt:.2%}',
]
equity_contents = [
'Equity',
pl.OutputLineBreak(),
f'{self.cost_of_equity:.2%}'
]
wacc_contents = ['WACC', pl.OutputLineBreak(), f'{self.wacc:.2%}']
debt_rect = lg.Rectangle(debt_equity_width, debt_height, debt_contents, shape_options=debt_options,
text_options=debt_text_options)
equity_rect = lg.Rectangle(debt_equity_width, equity_height, equity_contents, offset=(0, debt_height),
shape_options=equity_options)
wacc_rect = lg.Rectangle(2, total_height, wacc_contents, offset=(3, 1), shape_options=wacc_options)
contents = lg.TikZPicture(
[
debt_rect,
equity_rect,
wacc_rect,
lg.Arrow(debt_rect, wacc_rect),
lg.Arrow(equity_rect, wacc_rect)
]
)
return contents |
py | b40678d6e6142c5ce139c5ab13bce7b7eed2edf6 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.cm
from scipy.signal.windows import gaussian
import sklearn.metrics
from DataSet import createDataSetFromFile
from Utils import getProjectPath
from Evaluation import getSpecificColorMap, plotMinErrors, plotAlongAxisErrors,\
plotMinErrorsSqueezed
def createTargetShapeDelayFigure():
gestureLen = 20
gestureSig = np.concatenate([np.zeros((10,3)),np.random.normal(size=(gestureLen,3))*np.atleast_2d(gaussian(20, 3, 0)*2).T,np.zeros((10,3))],0)
target = np.concatenate([np.zeros((10,1)),np.ones((gestureLen,1)),np.zeros((10,1))],0)
target_gaus = np.concatenate([np.zeros((5,1)),np.atleast_2d(gaussian(gestureLen+10,5)).T,np.zeros((5,1))],0)
target_delayed = np.concatenate([np.zeros((28,1)),np.ones((5,1)),np.zeros((7,1))],0)
fig, ax = plt.subplots(1, 3, sharey=True, sharex=True, figsize=(20,5))
plt.ylim(-5,5)
for axn in ax:
axn.plot(gestureSig,label='input signal')
axn.plot([0,40],[0,0],c='black',linewidth=1)
ax[0].plot(target,label='target',c='red',linewidth=2)
ax[0].fill_between(np.arange(0,40),0,target.squeeze(),facecolor='red',alpha=0.5)
ax[0].set_title('(a)')
ax[0].set_xlabel('timestep')
ax[1].plot(target_gaus,label='target',c='red',linewidth=2)
ax[1].fill_between(np.arange(0,40),0,target_gaus.squeeze(),facecolor='red',alpha=0.5)
ax[1].set_title('(b)')
ax[1].set_xlabel('timestep')
ax[2].plot(target_delayed,label='target',c='red',linewidth=2)
ax[2].fill_between(np.arange(0,40),0,target_delayed.squeeze(),facecolor='red',alpha=0.5)
ax[2].set_title('(c)')
ax[2].set_xlabel('timestep')
#plt.legend(bbox_to_anchor=(1., 1.05), loc=1, borderaxespad=0.)
plt.tight_layout()
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\targetShapeDelay2.pdf'
pp = PdfPages(projectPath)
pp.savefig()
pp.close()
def createEvaluationProblem():
gestureLen = 20
target = np.concatenate([np.ones((gestureLen+1,1)),np.zeros((9,1)),np.ones((gestureLen,1)),np.zeros((40,1))],0)
target2 = np.concatenate([np.zeros((70,1)),np.ones((gestureLen,1))],0)
pred1 = np.concatenate([np.ones((8,1)),np.zeros((5,1)),np.ones((8,1)),np.zeros((69,1))],0)
pred2 = np.concatenate([np.zeros((7,1)),np.ones((7,1)),np.zeros((66,1)),np.ones((10,1))],0)
zero = np.zeros((100,1))
plt.figure(figsize=(20,5))
#plt.plot(target, label='Target Gesture 1', color='red', linewidth=2, linestyle='--')
#plt.plot(pred1, label='Pred. Gesture 1', color='red', linewidth=2, linestyle='-')
#plt.plot(pred2, label='Pred. Gesture 2', color='blue', linewidth=2, linestyle='-')
#plt.fill_between(np.arange(0,70), 0, 1, label='Target Gesture 1', facecolor='red', alpha=0.2, where=np.squeeze(target>0))
#plt.fill_between(np.arange(0,70), 0, np.squeeze(pred1), label='Pred. Gesture 1', facecolor='red', where=np.squeeze(pred1>=pred2))
#plt.fill_between(np.arange(0,70), 0, np.squeeze(pred2), label='Pred. Gesture 2', facecolor='blue', where=np.squeeze(pred2>=pred1))
plt.plot(np.ones((90,1))*0.5,color='black')
plt.plot(np.ones((90,1))*1,color='black')
plt.plot(np.ones((90,1))*-0.5,color='black')
plt.plot(np.ones((90,1))*-1,color='black')
plt.fill_between(np.arange(0,90), 0.5, 1, label='no gesture', facecolor='grey', alpha=0.4)
plt.fill_between(np.arange(0,90), 0.5, 1, facecolor='red', alpha=0.8, where=np.squeeze(target>0))
plt.fill_between(np.arange(0,90), 0.5, 1, facecolor='blue', alpha=0.8, where=np.squeeze(target2>0))
plt.fill_between(np.arange(0,90), -0.5, -1, facecolor='grey', alpha=0.4)
plt.fill_between(np.arange(0,90), -0.5, -1, label='Gesture 1', facecolor='red', where=np.squeeze(pred1==1))
plt.fill_between(np.arange(0,90), -0.50, -1, label='Gesture 2', facecolor='blue', where=np.squeeze(pred2==1))
plt.fill_between(np.arange(0,90), -0.2, 0.2, facecolor='yellow', alpha=0.2)
plt.annotate('TP',xy=(3.5,-0.1))
plt.plot([3,10],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('WG',xy=(8,-0.1))
plt.plot([10,10],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('FP',xy=(14,-0.1))
plt.plot([17,10],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('TP',xy=(34,-0.1))
plt.plot([50,25],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('FN',xy=(46,-0.1))
plt.plot([50,40],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('TP',xy=(55.5,-0.1))
plt.plot([50,60],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('TP',xy=(83.5,-0.1))
plt.plot([85,80],[-0.75,0.75],linewidth=3, color='black')
ax = plt.gca()
ax.text( 2.5, -1.3,str(1),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text( 9.5, -1.3,str(2),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(15 , -1.3,str(3),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(50 , -1.3,str(4),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(84.5, -1.3,str(5),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(39.5, 1.2,str(6),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(59.5, 1.2,str(7),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
plt.xlabel('time step')
plt.yticks([-0.75,0,0.75])
plt.setp(plt.gca(), 'yticklabels', ['Prediction','Mapping','Target'])
plt.ylim(-1.5,1.5)
plt.xlim(0,120)
plt.legend()
plt.tight_layout()
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\classificationProb.pdf'
pp = PdfPages(projectPath)
pp.savefig()
pp.close()
true = [1,1,1,2,3,3,3]
pred = [1,2,3,2,1,3,3]
print sklearn.metrics.f1_score(true,pred,average=None)
print np.mean(sklearn.metrics.f1_score(true,pred,average=None))
def createInputSignalFigure():
errors = [0.272813277233,0.233033147087,0.217966453407,0.139282580674,0.0953774246893,0.0898370698925,0.0551168200035]
labels = ['F','G','A','FG','FA','GA','FGA']
ax = plt.subplot()
#ax.bar(np.arange(0,7), errors, alpha=0.5)
cmap = matplotlib.cm.brg_r
for i, error in enumerate(errors):
ax.bar([i], errors[i], facecolor=cmap(error/0.5), alpha=1)
ax.set_xticks(np.arange(0.5,7.5,1))
ax.set_xticklabels(labels)
plt.ylabel('Validation Error')
plt.xlabel('Input signal')
plt.xlim(-0.5,7.5)
plt.ylim(0,0.5)
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\errorByInput.pdf'
pp = PdfPages(projectPath)
pp.savefig()
pp.close()
return ax
def createGroundTruthCreation():
ds = createDataSetFromFile('julian_0_fullSet.npz')
def bla():
vals = np.array([0.8867924528301887,
0.85238095238095235,
0.89047619047619042,
0.8418604651162791,
0.89622641509433965,
0.875,
0.86301369863013699,
0.82027649769585254,
0.83783783783783783,
0.90094339622641506,
0.75,
0.74568965517241381,
0.76855895196506552,
0.78240740740740744,
0.76923076923076927,
0.85308056872037918,
0.85915492957746475,
0.87019230769230771,
0.86976744186046506,
0.82938388625592419,
0.90047393364928907,
0.83257918552036203,
0.80888888888888888,
0.89671361502347413,
0.86915887850467288,
0.78026905829596416,
0.76211453744493396,
0.76956521739130435,
0.73931623931623935,
0.75107296137339052,
0.90476190476190477,
0.84931506849315064,
0.89099526066350709,
0.83486238532110091,
0.84722222222222221,
0.86098654708520184,
0.87441860465116283,
0.8545454545454545,
0.85849056603773588,
0.88732394366197187,
0.74889867841409696,
0.79824561403508776,
0.82949308755760365,
0.77253218884120167,
0.77876106194690264])
np.set_printoptions(precision=3)
for i in range(9):
print i
print str( "{0:.3f}".format(np.mean(vals[i*5:i*5+5]) )) + " (" + str("{0:.2f}".format(np.std(vals[i*5:i*5+5]))) + ")"
print
def evaluateNPZ(npzFile):
pp = PdfPages(getProjectPath()+"error_space_"+npzFile+".pdf")
a = np.load(getProjectPath()+npzFile)
plotMinErrors(a['errors'], a['params'], a['paraRanges'], pp, getSpecificColorMap())
i = 0
inputSignalAxis = -1
inputScalingAxis = -1
normAxis = -1
for node, param in a['params']:
if param == 'spectral_radius':
inputSignalAxis = i
elif param == 'output_dim':
inputScalingAxis = i
elif param == 'ridge_param':
normAxis = i
i =i+1
plotAlongAxisErrors(a['errors'], a['params'], a['paraRanges'], normAxis, inputSignalAxis, inputScalingAxis, pp, getSpecificColorMap())
pp.close()
#plt.close('all')
def plotErrorResSize():
matplotlib.rcParams.update({'font.size': 25})
npzFile = '2016-04-28-09-57_bigRunOnlySnap.npz'
npz2 = '2016-04-28-15-18_bigRunOnlySnap.npz'
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\errorResSize.pdf'
pp = PdfPages(projectPath)
a = np.load(getProjectPath()+npzFile)
errors = a['errors']
errors = np.mean(errors,2).squeeze()
b = np.load(getProjectPath()+npz2)
errors2 = b['errors']
errors2 = np.mean(errors2,2).squeeze()
plt.figure(figsize=(10,7.5))
plt.plot(errors, 'o', linestyle='-', linewidth=3, label='ridge para = 0.01')
#plt.plot(errors2, 'o', linestyle='-', linewidth=3, label='ridge para = 0.1')
plt.grid()
plt.minorticks_on()
plt.grid(which='minor', axis='y')
plt.xlabel('Reservoir size')
ticks = np.arange(0, 8)
labels = [25,50,100,200,400,800,1600,3200]
plt.xticks(ticks, labels)
plt.ylabel('Validation error')
plt.ylim(0,1)
plt.tight_layout()
pp.savefig()
pp.close()
#plt.close('all')
if __name__ == '__main__':
matplotlib.rcParams.update({'font.size': 20})
createGroundTruthCreation()
|
py | b40679debd80cbceb789f022857efc3d98067435 | from dynamic_preferences.types import StringPreference, IntegerPreference, BooleanPreference
from dynamic_preferences.registries import global_preferences_registry
from dynamic_preferences.preferences import Section
from django.conf import settings
general_section = Section('general')
homepage_section = Section('homepage')
@global_preferences_registry.register
class SiteTitle(StringPreference):
section = general_section
name = 'admin_title'
verbose_name = 'Admin Site Title'
default = 'Global Trade Motors'
@global_preferences_registry.register
class SiteHeader(StringPreference):
section = general_section
name = 'admin_header'
verbose_name = 'Admin Site Header'
default = 'Global Trade Motors'
@global_preferences_registry.register
class NumberOfVehiclesOnHompage(IntegerPreference):
section = homepage_section
name = 'number_of_vehicles'
verbose_name = 'Homepage Vehicles'
help_text = 'Please enter the number of vehicles to show on homepage.'
default = 16
@global_preferences_registry.register
class DefaultEmailAddress(StringPreference):
section = general_section
name = 'default_email'
verbose_name = 'Default Email Address'
help_text = 'Please enter the email address to show on the top header \
and other pages.'
default = '[email protected]'
if settings.DEFAULT_EMAIL_ADDRESS:
default = settings.DEFAULT_EMAIL_ADDRESS
@global_preferences_registry.register
class LiveChatFeature(BooleanPreference):
section = general_section
name = 'live_chat'
verbose_name = 'Live Chat'
help_text = 'Turn Live Chat feature on/off.'
default = False
|
py | b4067a16f3ff96aab1b49ba6604a1cbcc689873e | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class CrawlerType1Item(Item):
# define the fields for your item here like:
# name = scrapy.Field()
text = Field()
heading = Field()
img = Field()
|
py | b4067a7c98c0e411f1155319c0bd1dc3c42bcc08 | # -*- coding: utf-8 -*-
from django.test import TestCase
from hipster_api import fields
class FiledIntTestCase(TestCase):
def get_value(self, obj):
obj.to_python()
obj.to_rules(None)
return obj.value
def test_field_int(self):
obj = fields.Integer(default=0)
obj.setitem('123')
self.assertEqual(self.get_value(obj), 123)
obj.setitem(1234)
self.assertEqual(self.get_value(obj), 1234)
obj.setitem(-23)
self.assertEqual(self.get_value(obj), -23)
obj.setitem('asd123')
self.assertEqual(self.get_value(obj), 0)
def test_field_int_less(self):
obj = fields.IntegerLess(default=0, less=5)
obj.setitem('123')
self.assertEqual(self.get_value(obj), 0)
obj.setitem(2)
self.assertEqual(self.get_value(obj), 2)
obj.setitem(-23)
self.assertEqual(self.get_value(obj), -23)
obj.setitem('asd123')
self.assertEqual(self.get_value(obj), 0)
def test_field_int_larger(self):
obj = fields.IntegerLarger(default=0, larger=5)
obj.setitem('123')
self.assertEqual(self.get_value(obj), 123)
obj.setitem(2)
self.assertEqual(self.get_value(obj), 0)
obj.setitem(-23)
self.assertEqual(self.get_value(obj), 0)
obj.setitem('asd123')
self.assertEqual(self.get_value(obj), 0)
def test_field_int_list(self):
obj = fields.IntegerList(default='')
self.assertListEqual(self.get_value(obj), [])
obj.setitem('123,2,6')
self.assertListEqual(self.get_value(obj), [123, 2, 6])
obj.setitem('123, asdf, 2,6')
self.assertListEqual(self.get_value(obj), [])
|
py | b4067ad10b7ed171364fff7125a54b83f8410f4f | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 2 11:25:58 2019
@author: bjwil
"""
import copy
import networkx as nx
edge_dict = copy.deepcopy(edict)
def eulerian_path(edge_dict):
'''Generates an Eulerian cycle from the given edges.'''
G = nx.DiGraph(edge_dict)
if not(nx.is_eulerian(G)):
out_degrees = G.out_degree([node for node in G])
in_degrees = G.in_degree([node for node in G])
ds = [out_degrees, in_degrees]
d = {}
for k in out_degrees.keys():
d[k] = tuple(d[k] for d in ds)
for key in d:
d[key] = d[key][0] - d[key][1]
extra_out = [key for (key, value) in d.items() if value == 1][0]
extra_in = [key for (key, value) in d.items() if value == -1][0]
edge_dict[extra_in] = extra_out
current_node = extra_out
else:
current_node = next(iter(edge_dict.keys()))
path = [current_node]
# Get the initial cycle.
while True:
path.append(edge_dict[current_node][0])
if len(edge_dict[current_node]) == 1:
del edge_dict[current_node]
else:
edge_dict[current_node] = edge_dict[current_node][1:]
if path[-1] in edge_dict:
current_node = path[-1]
else:
break
# Continually expand the initial cycle until we're out of edge_dict.
while len(edge_dict) > 0:
for i in range(len(path)):
if path[i] in edge_dict:
current_node = path[i]
cycle = [current_node]
while True:
cycle.append(edge_dict[current_node][0])
if len(edge_dict[current_node]) == 1:
del edge_dict[current_node]
else:
edge_dict[current_node] = edge_dict[current_node][1:]
if cycle[-1] in edge_dict:
current_node = cycle[-1]
else:
break
path = path[:i] + cycle + path[i+1:]
break
return path
if __name__ == '__main__':
# Read the input data.
with open ('last.txt', 'r') as in_file:
lines = in_file.read().split('\n')
edges = {}
for connection in lines:
connection = connection.replace(" ", "")
edges[connection.split('->')[0]] = [v for v in connection.split('->')[1].split(',')]
# Get the Eulerian cycle.
path = eulerian_path(edges)
# Print and save the answer.
print('->'.join(map(str,path)))
with open('Output9.txt', 'w') as output_data:
output_data.write('->'.join(map(str,path))) |
py | b4067b0aa08f7801c34fb1b4afd81fed0392a8d6 | from board import Board
def choose_move(data: dict) -> str:
board:Board = Board(data)
move = board.chose_direction(board.you)
print(f"{data['game']['id']} MOVE {data['turn']}: {move} picked")
return move
|
py | b4067b4a6065ff4f99c8740c57875fe8969b4fac | """Snake, classic arcade game.
Exercises
1. How do you make the SnakeFast or SnakeSlow classes?
2. How do you make a SnakeSmart, that change the direction when collide with edges?
3. How would you make a new food types? When snake eat them it will more fast or decrease?
4. How do you create a Actor that will be the Head and Food superclass?
"""
from turtle import setup, hideturtle, tracer, listen, onkey, done, update, clear, ontimer
from random import randrange, choice
from freegames import square, vector
class Head:
def __init__(self, x, y):
self.position = vector(x, y)
@property
def x(self):
return self.position.x
@property
def y(self):
return self.position.y
class Food:
color = 'Blue'
cal = 1
def __init__(self, x, y):
self.position = vector(x, y)
@property
def x(self):
return self.position.x
@property
def y(self):
return self.position.y
class Snake:
SPEED = 1
def __init__(self, x=0, y=0):
self.head = Head(x, y)
self.body = [vector(10, 0)]
self.aim = vector(0*self.SPEED, -10*self.SPEED)
self.direction = "SOUTH"
self.status = 'LIVE'
def eat(self, food):
print('snake is eating', food.cal)
for x in range(food.cal):
self.body.append(self.head.position)
for x in range(food.cal, 0):
del self.body[0]
def move(self):
"Move snake forward one segment."
self.head = Head(*self.body[-1].copy())
self.head.position.move(self.aim)
if self.is_colliding_with_border():
self.on_collision_with_border()
elif self.is_eating_himself():
self.on_eating_himself()
else:
self.body.append(self.head.position)
self.body.pop(0) # cut the tail
def on_collision_with_border(self):
self.dead()
def on_eating_himself(self):
self.dead()
def is_eating_himself(self):
return (self.head.position in self.body)
def dead(self):
self.status = 'DEAD'
def alive(self):
return self.status != 'DEAD'
def is_colliding_with_border(self):
return not(-200 < self.head.x < 190 and -200 < self.head.y < 190)
def left(self):
if self.direction == "NORTH" :
self.aim.x = -10*self.SPEED
self.aim.y = 0*self.SPEED
elif self.direction == "SOUTH":
self.aim.x = 10*self.SPEED
self.aim.y = 0*self.SPEED
elif self.direction == "WEST" :
self.aim.x = 0*self.SPEED
self.aim.y = -10*self.SPEED
elif self.direction == "EAST":
self.aim.x = 0*self.SPEED
self.aim.y = 10*self.SPEED
def right(self):
if self.direction == "NORTH" :
self.aim.x = 10*self.SPEED
self.aim.y = 0*self.SPEED
elif self.direction == "SOUTH":
self.aim.x = -10*self.SPEED
self.aim.y = 0*self.SPEED
elif self.direction == "WEST" :
self.aim.x = 0*self.SPEED
self.aim.y = 10*self.SPEED
elif self.direction == "EAST":
self.aim.x = 0*self.SPEED
self.aim.y = -10*self.SPEED
class GameSnake:
def __init__(self):
self.food = self.new_food()
self.snake = Snake()
onkey(lambda: self.on_rightkeypressed() , 'Right')
onkey(lambda: self.on_leftkeypressed(), 'Left')
onkey(lambda: self.on_upkeypressed(), 'Up')
onkey(lambda: self.on_downkeypressed(), 'Down')
def on_rightkeypressed(self):
if self.snake.direction == 'NORTH':
self.snake.right()
elif self.snake.direction == "SOUTH":
self.snake.left()
self.snake.direction = "EAST"
def on_leftkeypressed(self):
if self.snake.direction == 'NORTH':
self.snake.left()
elif self.snake.direction == "SOUTH":
self.snake.right()
self.snake.direction = "WEST"
def on_upkeypressed(self):
if self.snake.direction == 'WEST':
self.snake.right()
elif self.snake.direction == "EAST":
self.snake.left()
self.snake.direction = "NORTH"
def on_downkeypressed (self):
if self.snake.direction == 'WEST':
self.snake.left()
elif self.snake.direction == "EAST":
self.snake.right()
self.snake.direction = "SOUTH"
def new_food(self):
foods = [Food]
type_food = choice(foods)
food = type_food(0, 0)
food.position = vector(randrange(-15, 15) * 10, randrange(-15, 15) * 10)
return food
def run(self):
clear()
for body in self.snake.body:
square(body.x, body.y, 9, 'black')
square(self.food.x, self.food.y, 9, self.food.color)
update()
self.snake.move()
if self.snake.head.position == self.food.position:
self.snake.eat(self.food)
self.food = self.new_food()
if self.snake.alive():
ontimer(self.run, 100)
else:
print('>>> SNAKE IS DEAD <<<')
square(self.snake.head.x, self.snake.head.y, 9, 'red')
return
def init():
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
game = GameSnake()
game.run()
done()
if __name__ == '__main__':
init() |
py | b4067c64995ebacdbd7024bad4674ac5a1401703 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def default_conv(in_channels, out_channels, kernel_size, bias=True, dilation=1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, dilation=dilation)
class ChannelZeroPad(nn.Module):
def __init__(self, prepadding=1, postpadding=0, value=0):
super(ChannelZeroPad, self).__init__()
self.prepadding = prepadding
self.postpadding = postpadding
self.value = 0
def forward(self, input):
return F.pad(input, (0, 0, 0, 0, self.prepadding, self.postpadding))
class MyUpsampler(nn.Module):
def __init__(self, conv, upscale_factor, n_feats, bias=True):
super(MyUpsampler, self).__init__()
self.upscale_factor = upscale_factor
self.conv1 = conv(n_feats, n_feats // 2, 3, bias)
self.conv2 = conv(n_feats // 2, self.upscale_factor ** 2 - 1, 3, bias)
self.ChannelZeroPad = ChannelZeroPad(1, 0, 0)
self.positionupscale = nn.PixelShuffle(self.upscale_factor)
self.relu = nn.ReLU(True)
def forward(self, x, preintp_x):
x = self.relu(self.conv1(x))
x = self.conv2(x)
x = self.ChannelZeroPad(x)
x += preintp_x.repeat(1, self.upscale_factor**2, 1, 1)
x = self.positionupscale(x)
return x |
py | b4067e5ce3ae0566cde77fcade345f29b2c40b96 | from .cart import *
def cart(request):
return {'cart': Cart(request)}
def cart1(request):
return {'cart1': Cart1(request)}
|
py | b4067fdad7574aa11a471c9a712137aadd520eae | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for standard operations on URIs of different kinds."""
from __future__ import print_function
import re
import sys
import urllib
import urllib2
from chromite.lib.paygen import filelib
from chromite.lib.paygen import gslib
# This module allows files from different storage types to be handled
# in a common way, for supported operations.
PROTOCOL_GS = gslib.PROTOCOL
PROTOCOL_HTTP = 'http'
PROTOCOL_HTTPS = 'https'
PROTOCOLS = (PROTOCOL_GS,
PROTOCOL_HTTP,
PROTOCOL_HTTPS)
PROTOCOL_SEP = '://'
EXTRACT_PROTOCOL_RE = re.compile(r'^(\w+)%s' % PROTOCOL_SEP)
SPLIT_URI_RE = re.compile(r'^(\w+)%s(.*)$' % PROTOCOL_SEP)
TYPE_GS = PROTOCOL_GS
TYPE_HTTP = PROTOCOL_HTTP
TYPE_HTTPS = PROTOCOL_HTTPS
TYPE_LOCAL = 'file'
class NotSupportedForType(RuntimeError):
"""Raised when operation is not supported for a particular file type"""
def __init__(self, uri_type, extra_msg=None):
# pylint: disable=protected-access
function = sys._getframe(1).f_code.co_name
msg = 'Function %s not supported for %s URIs' % (function, uri_type)
if extra_msg:
msg += ', ' + extra_msg
RuntimeError.__init__(self, msg)
class NotSupportedForTypes(RuntimeError):
"""Raised when operation is not supported for all particular file type"""
def __init__(self, extra_msg=None, *uri_types):
# pylint: disable=protected-access
function = sys._getframe(1).f_code.co_name
msg = ('Function %s not supported for set of URIs with types: %s' %
(function, ', '.join(uri_types)))
if extra_msg:
msg += ', ' + extra_msg
RuntimeError.__init__(self, msg)
class NotSupportedBetweenTypes(RuntimeError):
"""Raised when operation is not supported between particular file types"""
def __init__(self, uri_type1, uri_type2, extra_msg=None):
# pylint: disable=protected-access
function = sys._getframe(1).f_code.co_name
msg = ('Function %s not supported between %s and %s URIs' %
(function, uri_type1, uri_type2))
if extra_msg:
msg += ', ' + extra_msg
RuntimeError.__init__(self, msg)
class MissingURLError(RuntimeError):
"""Raised when nothing exists at URL."""
def ExtractProtocol(uri):
"""Take a URI and return the protocol it is using, if any.
Examples:
'gs://some/path' ==> 'gs'
'file:///some/path' ==> 'file'
'/some/path' ==> None
'/cns/some/colossus/path' ==> None
Args:
uri: The URI to get protocol from.
Returns:
Protocol string that is found, or None.
"""
match = EXTRACT_PROTOCOL_RE.search(uri)
if match:
return match.group(1)
return None
def GetUriType(uri):
"""Get the type of a URI.
See the TYPE_* constants for examples. This is mostly based
on URI protocols, with Colossus and local files as exceptions.
Args:
uri: The URI to consider
Returns:
The URI type.
"""
protocol = ExtractProtocol(uri)
if protocol:
return protocol
return TYPE_LOCAL
def SplitURI(uri):
"""Get the protocol and path from a URI
Examples:
'gs://some/path' ==> ('gs', 'some/path')
'file:///some/path' ==> ('file', '/some/path')
'/some/path' ==> (None, '/some/path')
'/cns/some/colossus/path' ==> (None, '/cns/some/colossus/path')
Args:
uri: The uri to get protocol and path from.
Returns;
Tuple (protocol, path)
"""
match = SPLIT_URI_RE.search(uri)
if match:
return (match.group(1), match.group(2))
return (None, uri)
def IsGsURI(uri):
"""Returns True if given uri uses Google Storage protocol."""
return PROTOCOL_GS == ExtractProtocol(uri)
def IsFileURI(uri):
"""Return True if given uri is a file URI (or path).
If uri uses the file protocol or it is a plain non-Colossus path
then return True.
Args:
uri: Any URI or path.
Returns:
True or False as described above.
"""
return TYPE_LOCAL == GetUriType(uri)
def IsHttpURI(uri, https_ok=False):
"""Returns True if given uri uses http, or optionally https, protocol.
Args:
uri: The URI to check.
https_ok: If True, then accept https protocol as well.
Returns:
Boolean
"""
uri_type = GetUriType(uri)
return TYPE_HTTP == uri_type or (https_ok and TYPE_HTTPS == uri_type)
def IsHttpsURI(uri):
"""Returns True if given uri uses https protocol."""
return TYPE_HTTPS == GetUriType(uri)
def MD5Sum(uri):
"""Compute or retrieve MD5 sum of uri.
Supported for: local files, GS files.
Args:
uri: The /unix/path or gs:// uri to compute the md5sum on.
Returns:
A string representing the md5sum of the file/uri passed in.
None if we do not understand the uri passed in or cannot compute
the md5sum.
"""
uri_type = GetUriType(uri)
if uri_type == TYPE_LOCAL:
return filelib.MD5Sum(uri)
elif uri_type == TYPE_GS:
try:
return gslib.MD5Sum(uri)
except gslib.GSLibError:
return None
# Colossus does not have a command for getting MD5 sum. We could
# copy the file to local disk and calculate it, but it seems better
# to explicitly say it is not supported.
raise NotSupportedForType(uri_type)
def Cmp(uri1, uri2):
"""Return True if paths hold identical files.
If either file is missing then always return False.
Args:
uri1: URI to a file.
uri2: URI to a file.
Returns:
True if files are the same, False otherwise.
Raises:
NotSupportedBetweenTypes if Cmp cannot be done between the two
URIs provided.
"""
uri_type1 = GetUriType(uri1)
uri_type2 = GetUriType(uri2)
uri_types = set([uri_type1, uri_type2])
if TYPE_GS in uri_types:
# GS only supported between other GS files or local files.
if len(uri_types) == 1 or TYPE_LOCAL in uri_types:
return gslib.Cmp(uri1, uri2)
if TYPE_LOCAL in uri_types and len(uri_types) == 1:
return filelib.Cmp(uri1, uri2)
raise NotSupportedBetweenTypes(uri_type1, uri_type2)
class URLopener(urllib.FancyURLopener):
"""URLopener that will actually complain when download fails."""
# The urllib.urlretrieve function, which seems like a good fit for this,
# does not give access to error code.
def http_error_default(self, *args, **kwargs):
urllib.URLopener.http_error_default(self, *args, **kwargs)
def URLRetrieve(src_url, dest_path):
"""Download file from given URL to given local file path.
Args:
src_url: URL to download from.
dest_path: Path to download to.
Raises:
MissingURLError if URL cannot be downloaded.
"""
opener = URLopener()
try:
opener.retrieve(src_url, dest_path)
except IOError as e:
# If the domain is valid but download failed errno shows up as None.
if e.errno is None:
raise MissingURLError('Unable to download %s' % src_url)
# If the domain is invalid the errno shows up as 'socket error', weirdly.
try:
int(e.errno)
# This means there was some normal error writing to the dest_path.
raise
except ValueError:
raise MissingURLError('Unable to download %s (bad domain?)' % src_url)
def Copy(src_uri, dest_uri):
"""Copy one uri to another.
Args:
src_uri: URI to copy from.
dest_uri: Path to copy to.
Raises:
NotSupportedBetweenTypes if Cmp cannot be done between the two
URIs provided.
"""
uri_type1 = GetUriType(src_uri)
uri_type2 = GetUriType(dest_uri)
uri_types = set([uri_type1, uri_type2])
if TYPE_GS in uri_types:
# GS only supported between other GS files or local files.
if len(uri_types) == 1 or TYPE_LOCAL in uri_types:
return gslib.Copy(src_uri, dest_uri)
if TYPE_LOCAL in uri_types and len(uri_types) == 1:
return filelib.Copy(src_uri, dest_uri)
if uri_type1 in (TYPE_HTTP, TYPE_HTTPS) and uri_type2 == TYPE_LOCAL:
# Download file from URL.
return URLRetrieve(src_uri, dest_uri)
raise NotSupportedBetweenTypes(uri_type1, uri_type2)
def Remove(*args, **kwargs):
"""Delete the file(s) at uris, or directory(s) with recurse set.
Args:
args: One or more URIs.
ignore_no_match: If True, then do not complain if anything was not
removed because no URI match was found. Like rm -f. Defaults to False.
recurse: Remove recursively starting at path. Same as rm -R. Defaults
to False.
"""
uri_types = set([GetUriType(u) for u in args])
if TYPE_GS in uri_types:
# GS support only allows local files among list.
if len(uri_types) == 1 or (TYPE_LOCAL in uri_types and len(uri_types) == 2):
return gslib.Remove(*args, **kwargs)
if TYPE_LOCAL in uri_types and len(uri_types) == 1:
return filelib.Remove(*args, **kwargs)
raise NotSupportedForTypes(*list(uri_types))
def Size(uri):
"""Return size of file at URI in bytes.
Args:
uri: URI to consider
Returns:
Size of file at given URI in bytes.
Raises:
MissingURLError if uri is a URL and cannot be found.
"""
uri_type = GetUriType(uri)
if TYPE_GS == uri_type:
return gslib.FileSize(uri)
if TYPE_LOCAL == uri_type:
return filelib.Size(uri)
if TYPE_HTTP == uri_type or TYPE_HTTPS == uri_type:
try:
response = urllib2.urlopen(uri)
if response.getcode() == 200:
return int(response.headers.getheader('Content-Length'))
except urllib2.HTTPError as e:
# Interpret 4** errors as our own MissingURLError.
if e.code < 400 or e.code >= 500:
raise
raise MissingURLError('No such file at URL %s' % uri)
raise NotSupportedForType(uri_type)
def Exists(uri, as_dir=False):
"""Return True if file exists at given URI.
If URI is a directory and as_dir is False then this will return False.
Args:
uri: URI to consider
as_dir: If True then check URI as a directory, otherwise check as a file.
Returns:
True if file (or directory) exists at URI, False otherwise.
"""
uri_type = GetUriType(uri)
if TYPE_GS == uri_type:
if as_dir:
# GS does not contain directories.
return False
return gslib.Exists(uri)
if TYPE_LOCAL == uri_type:
return filelib.Exists(uri, as_dir=as_dir)
if TYPE_HTTP == uri_type or TYPE_HTTPS == uri_type:
if as_dir:
raise NotSupportedForType(uri_type, extra_msg='with as_dir=True')
try:
response = urllib2.urlopen(uri)
return response.getcode() == 200
except urllib2.HTTPError:
return False
raise NotSupportedForType(uri_type)
def ListFiles(root_path, recurse=False, filepattern=None, sort=False):
"""Return list of file paths under given root path.
Directories are intentionally excluded from results. The root_path
argument can be a local directory path, a Google storage directory URI,
or a Colossus (/cns) directory path.
Args:
root_path: A local path, CNS path, or GS path to directory.
recurse: Look for files in subdirectories, as well
filepattern: glob pattern to match against basename of file
sort: If True then do a default sort on paths
Returns:
List of paths to files that matched
"""
uri_type = GetUriType(root_path)
if TYPE_GS == uri_type:
return gslib.ListFiles(root_path, recurse=recurse,
filepattern=filepattern, sort=sort)
if TYPE_LOCAL == uri_type:
return filelib.ListFiles(root_path, recurse=recurse,
filepattern=filepattern, sort=sort)
raise NotSupportedForType(uri_type)
def CopyFiles(src_dir, dst_dir):
"""Recursively copy all files from src_dir into dst_dir
This leverages the Copy method, so the restrictions there for what
copies are supported apply here.
Args:
src_dir: A local, CNS, or GS directory to copy from.
dst_dir: A local, CNS, or GS directory to copy into.
Returns:
A list of absolute path files for all copied files.
"""
dst_paths = []
src_paths = ListFiles(src_dir, recurse=True)
for src_path in src_paths:
dst_path = src_path.replace(src_dir, dst_dir)
Copy(src_path, dst_path)
dst_paths.append(dst_path)
return dst_paths
def RemoveDirContents(base_dir):
"""Remove all contents of a directory.
Args:
base_dir: directory to delete contents of.
"""
uri_type = GetUriType(base_dir)
if TYPE_GS == uri_type:
return gslib.RemoveDirContents(base_dir)
if TYPE_LOCAL == uri_type:
return filelib.RemoveDirContents(base_dir)
raise NotSupportedForType(uri_type)
|
py | b4067ff8d39e85d93218f5a92b0d4ea89da282d6 | import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from encoder import Encoder
from decoder import Decoder
from fc_decoder import FCDecoder
from vae import VAE
from vae import latent_loss
from data import FSPeptide
from data import UnlabeledContact
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Setup experiment.')
parser.add_argument('--input_size', type=int, default=441, help='flattened image size.')
parser.add_argument('--latent_size', type=int, default=3, help='latent dimension')
parser.add_argument('--batch_size', type=int, default=1, help='batch size for net')
parser.add_argument('--use_cuda', type=bool, default=True, help='Whether to use cuda.')
parser.add_argument('--model_path', type=str, default='/home/ygx/molecules/molecules/variational_autoencoder/save_points/saves_latent3/',
help='Path to saved model weights.')
parser.add_argument('--model_name', type=str, default='epoch90.pt', help='name of the saved model')
parser.add_argument('--latent_save_path', type=str,
default='/home/ygx/molecules/molecules/variational_autoencoder/generate_latent/fs_latent3_epoch90/',
help='path to save generated latent dimensions')
parser.add_argument('--recon_save_path', type=str,
default='/home/ygx/molecules/molecules/variational_autoencoder/generate_recon/fs_latent3_epoch90/',
help='path to save reconstructed images')
args = parser.parse_args()
def main():
"""
Generate images from a saved model
"""
train_data = UnlabeledContact(data='/home/ygx/data/fspeptide/fs_peptide.npy')
print('Number of samples: {}'.format(len(train_data)))
trainloader = DataLoader(train_data, batch_size=args.batch_size)
encoder = Encoder(input_size=args.input_size, latent_size=args.latent_size)
decoder = Decoder(latent_size=args.latent_size, output_size=args.input_size)
vae = VAE(encoder, decoder, use_cuda=args.use_cuda)
# Load saved model
vae.load_state_dict(torch.load(args.model_path + args.model_name))
if args.use_cuda:
encoder = encoder.cuda()
decoder = decoder.cuda()
vae = vae.cuda()
latent_arrys = []
recon_arrys = []
for batch_idx, data in enumerate(trainloader):
inputs = data['cont_matrix']
inputs = inputs.resize_(args.batch_size, 1, 21, 21)
inputs = inputs.float()
if args.use_cuda:
inputs = inputs.cuda()
inputs = Variable(inputs)
latent_array = encoder(inputs).data.cpu().numpy()
#print('latent_array has shape {}'.format(latent_array.shape))
latent_arrys.append(latent_array)
reconstructed_array = vae(inputs).data.cpu().numpy()
recon_arrys.append(reconstructed_array)
if batch_idx % 100 == 0:
print('Saving progress: {:.3f}%'.format(batch_idx * 100. / len(trainloader)))
print('\nNumber of images prepared: {}'.format(len(latent_arrys)))
latent_stacked = np.stack(latent_arrys, axis=0)
latent_filename = 'latent_imgs'
np.save(args.latent_save_path + latent_filename, latent_stacked)
recon_stacked = np.stack(recon_arrys, axis=0)
recon_filename = 'recon_imgs'
np.save(args.recon_save_path + recon_filename, recon_stacked)
if __name__=='__main__':
main()
|
py | b4068003e58d594120f5de133c1794db7330f12a | from abc import ABC, abstractmethod
from striatum import make_logger
import gym
logger = make_logger(__file__)
class Env(gym.Env):
...
class Policy(ABC):
@abstractmethod
def update(self, reward): pass
@abstractmethod
def sample(self, observation): pass
def sample_and_update(self, reward, observation):
self.update(reward)
return self.sample()
|
py | b40681ff853ad32ab4f938e26f610699a4462898 | from datetime import date as d, timedelta
from time import strptime
from testfixtures import ShouldRaise, test_date, replace, compare
from testfixtures.tests import sample1, sample2
from unittest import TestCase
class TestDate(TestCase):
# NB: Only the today method is currently stubbed out,
# if you need other methods, tests and patches
# greatfully received!
@replace('datetime.date', test_date())
def test_today(self):
from datetime import date
compare(date.today(), d(2001, 1, 1))
compare(date.today(), d(2001, 1, 2))
compare(date.today(), d(2001, 1, 4))
@replace('datetime.date', test_date(2001, 2, 3))
def test_today_supplied(self):
from datetime import date
compare(date.today(), d(2001, 2, 3))
@replace('datetime.date', test_date(year=2001, month=2, day=3))
def test_today_all_kw(self):
from datetime import date
compare(date.today(), d(2001, 2, 3))
@replace('datetime.date', test_date(None))
def test_today_sequence(self, t):
t.add(2002, 1, 1)
t.add(2002, 1, 2)
t.add(2002, 1, 3)
from datetime import date
compare(date.today(), d(2002, 1, 1))
compare(date.today(), d(2002, 1, 2))
compare(date.today(), d(2002, 1, 3))
@replace('datetime.date', test_date(None))
def test_today_requested_longer_than_supplied(self, t):
t.add(2002, 1, 1)
t.add(2002, 1, 2)
from datetime import date
compare(date.today(), d(2002, 1, 1))
compare(date.today(), d(2002, 1, 2))
compare(date.today(), d(2002, 1, 3))
compare(date.today(), d(2002, 1, 5))
@replace('datetime.date', test_date(None))
def test_add_date_supplied(self):
from datetime import date
date.add(d(2001, 1, 2))
date.add(date(2001, 1, 3))
compare(date.today(), d(2001, 1, 2))
compare(date.today(), d(2001, 1, 3))
def test_instantiate_with_date(self):
from datetime import date
t = test_date(date(2002, 1, 1))
compare(t.today(), d(2002, 1, 1))
@replace('datetime.date', test_date(strict=True))
def test_call(self, t):
compare(t(2002, 1, 2), d(2002, 1, 2))
from datetime import date
dt = date(2003, 2, 1)
self.failIf(dt.__class__ is d)
compare(dt, d(2003, 2, 1))
def test_gotcha_import(self):
# standard `replace` caveat, make sure you
# patch all revelent places where date
# has been imported:
@replace('datetime.date', test_date())
def test_something():
from datetime import date
compare(date.today(), d(2001, 1, 1))
compare(sample1.str_today_1(), '2001-01-02')
with ShouldRaise(AssertionError) as s:
test_something()
# This convoluted check is because we can't stub
# out the date, since we're testing stubbing out
# the date ;-)
j, dt1, j, dt2, j = s.raised.args[0].split("'")
# check we can parse the date
dt1 = strptime(dt1, '%Y-%m-%d')
# check the dt2 bit was as it should be
compare(dt2, '2001-01-02')
# What you need to do is replace the imported type:
@replace('testfixtures.tests.sample1.date', test_date())
def test_something():
compare(sample1.str_today_1(), '2001-01-01')
test_something()
def test_gotcha_import_and_obtain(self):
# Another gotcha is where people have locally obtained
# a class attributes, where the normal patching doesn't
# work:
@replace('testfixtures.tests.sample1.date', test_date())
def test_something():
compare(sample1.str_today_2(), '2001-01-01')
with ShouldRaise(AssertionError) as s:
test_something()
# This convoluted check is because we can't stub
# out the date, since we're testing stubbing out
# the date ;-)
j, dt1, j, dt2, j = s.raised.args[0].split("'")
# check we can parse the date
dt1 = strptime(dt1, '%Y-%m-%d')
# check the dt2 bit was as it should be
compare(dt2, '2001-01-01')
# What you need to do is replace the imported name:
@replace('testfixtures.tests.sample1.today', test_date().today)
def test_something():
compare(sample1.str_today_2(), '2001-01-01')
test_something()
# if you have an embedded `today` as above, *and* you need to supply
# a list of required dates, then it's often simplest just to
# do a manual try-finally with a replacer:
def test_import_and_obtain_with_lists(self):
t = test_date(None)
t.add(2002, 1, 1)
t.add(2002, 1, 2)
from testfixtures import Replacer
r = Replacer()
r.replace('testfixtures.tests.sample1.today', t.today)
try:
compare(sample1.str_today_2(), '2002-01-01')
compare(sample1.str_today_2(), '2002-01-02')
finally:
r.restore()
@replace('datetime.date', test_date())
def test_repr(self):
from datetime import date
compare(repr(date), "<class 'testfixtures.tdatetime.tdate'>")
@replace('datetime.date', test_date(delta=2))
def test_delta(self):
from datetime import date
compare(date.today(), d(2001, 1, 1))
compare(date.today(), d(2001, 1, 3))
compare(date.today(), d(2001, 1, 5))
@replace('datetime.date', test_date(delta_type='weeks'))
def test_delta_type(self):
from datetime import date
compare(date.today(), d(2001, 1, 1))
compare(date.today(), d(2001, 1, 8))
compare(date.today(), d(2001, 1, 22))
@replace('datetime.date', test_date(None))
def test_set(self):
from datetime import date
date.set(2001, 1, 2)
compare(date.today(), d(2001, 1, 2))
date.set(2002, 1, 1)
compare(date.today(), d(2002, 1, 1))
compare(date.today(), d(2002, 1, 3))
@replace('datetime.date', test_date(None))
def test_set_date_supplied(self):
from datetime import date
date.set(d(2001, 1, 2))
compare(date.today(), d(2001, 1, 2))
date.set(date(2001, 1, 3))
compare(date.today(), d(2001, 1, 3))
@replace('datetime.date', test_date(None))
def test_set_kw(self):
from datetime import date
date.set(year=2001, month=1, day=2)
compare(date.today(), d(2001, 1, 2))
@replace('datetime.date', test_date(None))
def test_add_kw(self, t):
t.add(year=2002, month=1, day=1)
from datetime import date
compare(date.today(), d(2002, 1, 1))
@replace('datetime.date', test_date(strict=True))
def test_isinstance_strict_true(self):
from datetime import date
to_check = []
to_check.append(date(1999, 1, 1))
to_check.append(date.today())
date.set(2001, 1, 2)
to_check.append(date.today())
date.add(2001, 1, 3)
to_check.append(date.today())
to_check.append(date.today())
date.set(date(2001, 1, 4))
to_check.append(date.today())
date.add(date(2001, 1, 5))
to_check.append(date.today())
to_check.append(date.today())
date.set(d(2001, 1, 4))
to_check.append(date.today())
date.add(d(2001, 1, 5))
to_check.append(date.today())
to_check.append(date.today())
for inst in to_check:
self.failUnless(isinstance(inst, date), inst)
self.failUnless(inst.__class__ is date, inst)
self.failUnless(isinstance(inst, d), inst)
self.failIf(inst.__class__ is d, inst)
@replace('datetime.date', test_date())
def test_isinstance_default(self):
from datetime import date
to_check = []
to_check.append(date(1999, 1, 1))
to_check.append(date.today())
date.set(2001, 1, 2)
to_check.append(date.today())
date.add(2001, 1, 3)
to_check.append(date.today())
to_check.append(date.today())
date.set(date(2001, 1, 4))
to_check.append(date.today())
date.add(date(2001, 1, 5))
to_check.append(date.today())
to_check.append(date.today())
date.set(d(2001, 1, 4))
to_check.append(date.today())
date.add(d(2001, 1, 5))
to_check.append(date.today())
to_check.append(date.today())
for inst in to_check:
self.failIf(isinstance(inst, date), inst)
self.failIf(inst.__class__ is date, inst)
self.failUnless(isinstance(inst, d), inst)
self.failUnless(inst.__class__ is d, inst)
def test_tick_when_static(self):
date = test_date(delta=0)
compare(date.today(), expected=d(2001, 1, 1))
date.tick(days=1)
compare(date.today(), expected=d(2001, 1, 2))
def test_tick_when_dynamic(self):
# hopefully not that common?
date = test_date()
compare(date.today(), expected=date(2001, 1, 1))
date.tick(days=1)
compare(date.today(), expected=date(2001, 1, 3))
def test_tick_with_timedelta_instance(self):
date = test_date(delta=0)
compare(date.today(), expected=d(2001, 1, 1))
date.tick(timedelta(days=1))
compare(date.today(), expected=d(2001, 1, 2))
|
py | b40682f761f6eb1db7904ef86c2757986e4ee177 | """
Contour segmentation
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: Ramon Casero <[email protected]>
"""
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
# PyCharm automatically adds cytometer to the python path, but this doesn't happen if the script is run
# with "python scriptname.py"
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
import pickle
import inspect
# other imports
import glob
import shutil
import datetime
import numpy as np
import matplotlib.pyplot as plt
# use CPU for testing on laptop
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, AvgPool2D, Activation
# for data parallelism in keras models
from keras.utils import multi_gpu_model
import cytometer.data
import cytometer.model_checkpoint_parallel
import random
import tensorflow as tf
# # limit GPU memory used
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 1.0
# set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
# number of blocks to split each image into so that training fits into GPU memory
nblocks = 2
# number of folds for k-fold cross validation
n_folds = 11
# number of epochs for training
epochs = 20
'''Directories and filenames
'''
# data paths
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
training_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training')
training_non_overlap_data_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_non_overlap')
training_augmented_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_augmented')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
# script name to identify this experiment
experiment_id = inspect.getfile(inspect.currentframe())
if experiment_id == '<input>':
experiment_id = 'unknownscript'
else:
experiment_id = os.path.splitext(os.path.basename(experiment_id))[0]
'''CNN Model
'''
def fcn_sherrah2016_classifier(input_shape, for_receptive_field=False):
input = Input(shape=input_shape, dtype='float32', name='input_image')
x = Conv2D(filters=32, kernel_size=(5, 5), strides=1, dilation_rate=1, padding='same')(input)
if for_receptive_field:
x = Activation('linear')(x)
x = AvgPool2D(pool_size=(3, 3), strides=1, padding='same')(x)
else:
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=1, padding='same')(x)
x = Conv2D(filters=int(96/2), kernel_size=(5, 5), strides=1, dilation_rate=2, padding='same')(x)
if for_receptive_field:
x = Activation('linear')(x)
x = AvgPool2D(pool_size=(5, 5), strides=1, padding='same')(x)
else:
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(5, 5), strides=1, padding='same')(x)
x = Conv2D(filters=int(128/2), kernel_size=(3, 3), strides=1, dilation_rate=4, padding='same')(x)
if for_receptive_field:
x = Activation('linear')(x)
x = AvgPool2D(pool_size=(9, 9), strides=1, padding='same')(x)
else:
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(9, 9), strides=1, padding='same')(x)
x = Conv2D(filters=int(196/2), kernel_size=(3, 3), strides=1, dilation_rate=8, padding='same')(x)
if for_receptive_field:
x = Activation('linear')(x)
x = AvgPool2D(pool_size=(17, 17), strides=1, padding='same')(x)
else:
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(17, 17), strides=1, padding='same')(x)
x = Conv2D(filters=int(512/2), kernel_size=(3, 3), strides=1, dilation_rate=16, padding='same')(x)
if for_receptive_field:
x = Activation('linear')(x)
else:
x = Activation('relu')(x)
# dimensionality reduction
x = Conv2D(filters=1, kernel_size=(1, 1), strides=1, dilation_rate=1, padding='same')(x)
# classification output
classification_output = Activation('hard_sigmoid', name='classification_output')(x)
return Model(inputs=input, outputs=[classification_output])
'''Prepare folds
'''
# list of original training images, pre-augmentation
im_orig_file_list = glob.glob(os.path.join(training_augmented_dir, 'im_*_nan_*.tif'))
# number of original training images
n_orig_im = len(im_orig_file_list)
# create k-fold sets to split the data into training vs. testing
kfold_seed = 0
random.seed(kfold_seed)
idx = random.sample(range(n_orig_im), n_orig_im)
idx_test_all = np.array_split(idx, n_folds)
# save the k-fold description for future reference
saved_model_datainfo_filename = os.path.join(saved_models_dir, experiment_id + '_info.pickle')
with open(saved_model_datainfo_filename, 'wb') as f:
x = {'file_list': im_orig_file_list, 'idx_test_all': idx_test_all, 'kfold_seed': kfold_seed}
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
# loop each fold: we split the data into train vs test, train a model, and compute errors with the
# test data. In each fold, the test data is different
# for i_fold, idx_test in enumerate(idx_test_all):
for i_fold, idx_test in enumerate([idx_test_all[0]]):
'''Load data
'''
# split the data into training and testing datasets
im_test_file_list, im_train_file_list = cytometer.data.split_list(im_orig_file_list, idx_test)
# add the augmented image files
im_train_file_list = cytometer.data.augment_file_list(im_train_file_list, '_nan_', '_*_')
im_test_file_list = cytometer.data.augment_file_list(im_test_file_list, '_nan_', '_*_')
# load the train and test data: im, seg, dmap and mask data
train_dataset, train_file_list, train_shuffle_idx = \
cytometer.data.load_datasets(im_train_file_list, prefix_from='im', prefix_to=['im', 'seg', 'mask'],
nblocks=nblocks, shuffle_seed=i_fold)
test_dataset, test_file_list, test_shuffle_idx = \
cytometer.data.load_datasets(im_test_file_list, prefix_from='im', prefix_to=['im', 'seg', 'mask'],
nblocks=nblocks, shuffle_seed=i_fold)
# remove training data where the mask has very few valid pixels
train_dataset = cytometer.data.remove_poor_data(train_dataset, prefix='mask', threshold=1000)
test_dataset = cytometer.data.remove_poor_data(test_dataset, prefix='mask', threshold=1000)
if DEBUG:
i = 150
plt.clf()
for pi, prefix in enumerate(train_dataset.keys()):
plt.subplot(1, len(train_dataset.keys()), pi + 1)
if train_dataset[prefix].shape[-1] < 3:
plt.imshow(train_dataset[prefix][i, :, :, 0])
else:
plt.imshow(train_dataset[prefix][i, :, :, :])
plt.title('out[' + prefix + ']')
i = 22
plt.clf()
for pi, prefix in enumerate(test_dataset.keys()):
plt.subplot(1, len(test_dataset.keys()), pi + 1)
if test_dataset[prefix].shape[-1] < 3:
plt.imshow(test_dataset[prefix][i, :, :, 0])
else:
plt.imshow(test_dataset[prefix][i, :, :, :])
plt.title('out[' + prefix + ']')
'''Convolutional neural network training
Note: you need to use my branch of keras with the new functionality, that allows element-wise weights of the loss
function
'''
# list all CPUs and GPUs
device_list = K.get_session().list_devices()
# number of GPUs
gpu_number = np.count_nonzero(['GPU' in str(x) for x in device_list])
# instantiate model
with tf.device('/cpu:0'):
model = fcn_sherrah2016_classifier(input_shape=train_dataset['im'].shape[1:])
saved_model_filename = os.path.join(saved_models_dir, experiment_id + '_model_fold_' + str(i_fold) + '.h5')
if gpu_number > 1: # compile and train model: Multiple GPUs
# checkpoint to save model after each epoch
checkpointer = cytometer.model_checkpoint_parallel.ModelCheckpoint(filepath=saved_model_filename,
verbose=1, save_best_only=True)
# compile model
parallel_model = multi_gpu_model(model, gpus=gpu_number)
parallel_model.compile(loss={'classification_output': 'binary_crossentropy'},
optimizer='Adadelta',
metrics={'classification_output': 'accuracy'},
sample_weight_mode='element')
# train model
tic = datetime.datetime.now()
parallel_model.fit(train_dataset['im'],
{'classification_output': train_dataset['seg']},
sample_weight={'classification_output': train_dataset['mask'][..., 0]},
validation_data=(test_dataset['im'],
{'classification_output': test_dataset['seg']},
{'classification_output': test_dataset['mask'][..., 0]}),
batch_size=10, epochs=epochs, initial_epoch=0,
callbacks=[checkpointer])
toc = datetime.datetime.now()
print('Training duration: ' + str(toc - tic))
else: # compile and train model: One GPU
# checkpoint to save model after each epoch
checkpointer = keras.callbacks.ModelCheckpoint(filepath=saved_model_filename,
verbose=1, save_best_only=True)
# compile model
model.compile(loss={'classification_output': 'binary_crossentropy'},
optimizer='Adadelta',
metrics={'classification_output': 'accuracy'},
sample_weight_mode='element')
# train model
tic = datetime.datetime.now()
model.fit(train_dataset['im'],
{'classification_output': train_dataset['seg']},
sample_weight={'classification_output': train_dataset['mask'][..., 0]},
validation_data=(test_dataset['im'],
{'classification_output': test_dataset['seg']},
{'classification_output': test_dataset['mask'][..., 0]}),
batch_size=10, epochs=epochs, initial_epoch=0,
callbacks=[checkpointer])
toc = datetime.datetime.now()
print('Training duration: ' + str(toc - tic))
# if we run the script with qsub on the cluster, the standard output is in file
# klf14_b6ntac_exp_0001_cnn_dmap_contour.sge.sh.oPID where PID is the process ID
# Save it to saved_models directory
log_filename = os.path.join(saved_models_dir, experiment_id + '.log')
stdout_filename = os.path.join(home, 'Software', 'cytometer', 'scripts', experiment_id + '.sge.sh.o*')
stdout_filename = glob.glob(stdout_filename)[0]
if stdout_filename and os.path.isfile(stdout_filename):
shutil.copy2(stdout_filename, log_filename)
else:
# if we ran the script with nohup in linux, the standard output is in file nohup.out.
# Save it to saved_models directory
log_filename = os.path.join(saved_models_dir, experiment_id + '.log')
nohup_filename = os.path.join(home, 'Software', 'cytometer', 'scripts', 'nohup.out')
if os.path.isfile(nohup_filename):
shutil.copy2(nohup_filename, log_filename)
|
py | b406843ee5b73c4ff70e44e48a6a2762785dd492 | #!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: http://kazuto1011.github.io
# Created: 2017-11-19
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
class _ConvBatchNormReLU(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, relu=True):
super(_ConvBatchNormReLU, self).__init__()
self.add_module(
'conv',
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=False,
),
)
self.add_module(
'bn',
nn.BatchNorm2d(
num_features=out_channels,
eps=1e-5,
momentum=0.999,
affine=True,
),
)
if relu:
self.add_module('relu', nn.ReLU())
def forward(self, x):
return super(_ConvBatchNormReLU, self).forward(x)
class _Bottleneck(nn.Sequential):
"""Bottleneck Unit"""
def __init__(self, in_channels, mid_channels, out_channels, stride, dilation, downsample):
super(_Bottleneck, self).__init__()
self.reduce = _ConvBatchNormReLU(in_channels, mid_channels, 1, stride, 0, 1)
self.conv3x3 = _ConvBatchNormReLU(mid_channels, mid_channels, 3, 1, dilation, dilation)
self.increase = _ConvBatchNormReLU(mid_channels, out_channels, 1, 1, 0, 1, relu=False)
self.downsample = downsample
if self.downsample:
self.proj = _ConvBatchNormReLU(in_channels, out_channels, 1, stride, 0, 1, relu=False)
def forward(self, x):
h = self.reduce(x)
h = self.conv3x3(h)
h = self.increase(h)
if self.downsample:
h += self.proj(x)
else:
h += x
return F.relu(h)
class _ResBlock(nn.Sequential):
"""Residual Block"""
def __init__(self, n_layers, in_channels, mid_channels, out_channels, stride, dilation):
super(_ResBlock, self).__init__()
self.add_module('block1', _Bottleneck(in_channels, mid_channels, out_channels, stride, dilation, True))
for i in range(2, n_layers + 1):
self.add_module('block' + str(i), _Bottleneck(out_channels, mid_channels, out_channels, 1, dilation, False))
def __call__(self, x):
return super(_ResBlock, self).forward(x)
class _ResBlockMG(nn.Sequential):
"""3x Residual Block with multi-grid"""
def __init__(self, n_layers, in_channels, mid_channels, out_channels, stride, dilation, mg=[1, 2, 1]):
super(_ResBlockMG, self).__init__()
self.add_module('block1', _Bottleneck(in_channels, mid_channels, out_channels, stride, dilation * mg[0], True))
self.add_module('block2', _Bottleneck(out_channels, mid_channels, out_channels, 1, dilation * mg[1], False))
self.add_module('block3', _Bottleneck(out_channels, mid_channels, out_channels, 1, dilation * mg[2], False))
def __call__(self, x):
return super(_ResBlockMG, self).forward(x)
|
py | b406848635fb87b7adf4aeb40dc3d4dc148a64cd | OO_MODULE_LEN = 4
|
py | b406848b5fbf1e52c431982d301299fb752dd1f2 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import glob
import itertools
import os.path
import re
import weakref
from oslo_config import cfg
from oslo_log import log
from oslo_utils import fnmatch
import six
from heat.common import environment_format as env_fmt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import policy
from heat.engine import support
LOG = log.getLogger(__name__)
HOOK_TYPES = (
HOOK_PRE_CREATE, HOOK_PRE_UPDATE, HOOK_PRE_DELETE, HOOK_POST_CREATE,
HOOK_POST_UPDATE, HOOK_POST_DELETE
) = (
'pre-create', 'pre-update', 'pre-delete', 'post-create',
'post-update', 'post-delete'
)
RESTRICTED_ACTIONS = (UPDATE, REPLACE) = ('update', 'replace')
def valid_hook_type(hook):
return hook in HOOK_TYPES
def valid_restricted_actions(action):
return action in RESTRICTED_ACTIONS
def is_hook_definition(key, value):
is_valid_hook = False
if key == 'hooks':
if isinstance(value, six.string_types):
is_valid_hook = valid_hook_type(value)
elif isinstance(value, collections.Sequence):
is_valid_hook = all(valid_hook_type(hook) for hook in value)
if not is_valid_hook:
msg = (_('Invalid hook type "%(value)s" for resource '
'breakpoint, acceptable hook types are: %(types)s') %
{'value': value, 'types': HOOK_TYPES})
raise exception.InvalidBreakPointHook(message=msg)
return is_valid_hook
def is_valid_restricted_action(key, value):
valid_action = False
if key == 'restricted_actions':
if isinstance(value, six.string_types):
valid_action = valid_restricted_actions(value)
elif isinstance(value, collections.Sequence):
valid_action = all(valid_restricted_actions(
action) for action in value)
if not valid_action:
msg = (_('Invalid restricted_action type "%(value)s" for '
'resource, acceptable restricted_action '
'types are: %(types)s') %
{'value': value, 'types': RESTRICTED_ACTIONS})
raise exception.InvalidRestrictedAction(message=msg)
return valid_action
class ResourceInfo(object):
"""Base mapping of resource type to implementation."""
def __new__(cls, registry, path, value, **kwargs):
"""Create a new ResourceInfo of the appropriate class."""
if cls != ResourceInfo:
# Call is already for a subclass, so pass it through
return super(ResourceInfo, cls).__new__(cls)
name = path[-1]
if name.endswith(('.yaml', '.template')):
# a template url for the resource "Type"
return TemplateResourceInfo(registry, path, value)
elif not isinstance(value, six.string_types):
return ClassResourceInfo(registry, path, value)
elif value.endswith(('.yaml', '.template')):
# a registered template
return TemplateResourceInfo(registry, path, value)
elif name.endswith('*'):
return GlobResourceInfo(registry, path, value)
else:
return MapResourceInfo(registry, path, value)
def __init__(self, registry, path, value):
self._registry = weakref.ref(registry)
self.path = path
self.name = path[-1]
self.value = value
self.user_resource = True
@property
def registry(self):
return self._registry()
def __eq__(self, other):
if other is None:
return False
return (self.path == other.path and
self.value == other.value and
self.user_resource == other.user_resource)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if self.user_resource != other.user_resource:
# user resource must be sorted above system ones.
return self.user_resource > other.user_resource
if len(self.path) != len(other.path):
# more specific (longer) path must be sorted above system ones.
return len(self.path) > len(other.path)
return self.path < other.path
def __gt__(self, other):
return other.__lt__(self)
def get_resource_info(self, resource_type=None, resource_name=None):
return self
def matches(self, resource_type):
return False
def get_class(self):
raise NotImplemented
def get_class_to_instantiate(self):
return self.get_class()
def __str__(self):
return '[%s](User:%s) %s -> %s' % (self.description,
self.user_resource,
self.name, str(self.value))
class ClassResourceInfo(ResourceInfo):
"""Store the mapping of resource name to python class implementation."""
description = 'Plugin'
def get_class(self, files=None):
return self.value
class TemplateResourceInfo(ResourceInfo):
"""Store the info needed to start a TemplateResource."""
description = 'Template'
def __init__(self, registry, path, value):
super(TemplateResourceInfo, self).__init__(registry, path, value)
if self.name.endswith(('.yaml', '.template')):
self.template_name = self.name
else:
self.template_name = value
self.value = self.template_name
def get_class(self, files=None):
from heat.engine.resources import template_resource
if files and self.template_name in files:
data = files[self.template_name]
else:
if self.user_resource:
allowed_schemes = template_resource.REMOTE_SCHEMES
else:
allowed_schemes = template_resource.LOCAL_SCHEMES
data = template_resource.TemplateResource.get_template_file(
self.template_name,
allowed_schemes)
param_defaults = self.registry.param_defaults
return template_resource.generate_class_from_template(str(self.name),
data,
param_defaults)
def get_class_to_instantiate(self):
from heat.engine.resources import template_resource
return template_resource.TemplateResource
class MapResourceInfo(ResourceInfo):
"""Store the mapping of one resource type to another.
like: OS::Networking::FloatingIp -> OS::Neutron::FloatingIp
"""
description = 'Mapping'
def get_class(self, files=None):
return None
def get_resource_info(self, resource_type=None, resource_name=None):
return self.registry.get_resource_info(self.value, resource_name)
class GlobResourceInfo(MapResourceInfo):
"""Store the mapping (with wild cards) of one resource type to another.
like: OS::Networking::* -> OS::Neutron::*
Also supports many-to-one mapping (mostly useful together with special
"OS::Heat::None" resource)
like: OS::* -> OS::Heat::None
"""
description = 'Wildcard Mapping'
def get_resource_info(self, resource_type=None, resource_name=None):
# NOTE(pas-ha) we end up here only when self.name already
# ends with * so truncate it
orig_prefix = self.name[:-1]
if self.value.endswith('*'):
new_type = self.value[:-1] + resource_type[len(orig_prefix):]
else:
new_type = self.value
return self.registry.get_resource_info(new_type, resource_name)
def matches(self, resource_type):
# prevent self-recursion in case of many-to-one mapping
match = (resource_type != self.value and
resource_type.startswith(self.name[:-1]))
return match
class ResourceRegistry(object):
"""By looking at the environment, find the resource implementation."""
def __init__(self, global_registry, param_defaults):
self._registry = {'resources': {}}
self.global_registry = global_registry
self.param_defaults = param_defaults
def load(self, json_snippet):
self._load_registry([], json_snippet)
def register_class(self, resource_type, resource_class, path=None):
if path is None:
path = [resource_type]
ri = ResourceInfo(self, path, resource_class)
self._register_info(path, ri)
def _load_registry(self, path, registry):
for k, v in iter(registry.items()):
if v is None:
self._register_info(path + [k], None)
elif is_hook_definition(k, v) or is_valid_restricted_action(k, v):
self._register_item(path + [k], v)
elif isinstance(v, dict):
self._load_registry(path + [k], v)
else:
self._register_info(path + [k],
ResourceInfo(self, path + [k], v))
def _register_item(self, path, item):
name = path[-1]
registry = self._registry
for key in path[:-1]:
if key not in registry:
registry[key] = {}
registry = registry[key]
registry[name] = item
def _register_info(self, path, info):
"""Place the new info in the correct location in the registry.
:param path: a list of keys ['resources', 'my_srv', 'OS::Nova::Server']
"""
descriptive_path = '/'.join(path)
name = path[-1]
# create the structure if needed
registry = self._registry
for key in path[:-1]:
if key not in registry:
registry[key] = {}
registry = registry[key]
if info is None:
if name.endswith('*'):
# delete all matching entries.
for res_name in list(six.iterkeys(registry)):
if (isinstance(registry[res_name], ResourceInfo) and
res_name.startswith(name[:-1])):
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': res_name,
'path': descriptive_path})
del registry[res_name]
else:
# delete this entry.
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': name,
'path': descriptive_path})
registry.pop(name, None)
return
if name in registry and isinstance(registry[name], ResourceInfo):
if registry[name] == info:
return
details = {
'path': descriptive_path,
'was': str(registry[name].value),
'now': str(info.value)}
LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'),
details)
if isinstance(info, ClassResourceInfo):
if info.value.support_status.status != support.SUPPORTED:
if info.value.support_status.message is not None:
details = {
'name': info.name,
'status': six.text_type(
info.value.support_status.status),
'message': six.text_type(
info.value.support_status.message)
}
LOG.warning(_LW('%(name)s is %(status)s. %(message)s'),
details)
info.user_resource = (self.global_registry is not None)
registry[name] = info
def log_resource_info(self, show_all=False, prefix=None):
registry = self._registry
prefix = '%s ' % prefix if prefix is not None else ''
for name in registry:
if name == 'resources':
continue
if show_all or isinstance(registry[name], TemplateResourceInfo):
msg = (_LI('%(p)sRegistered: %(t)s') %
{'p': prefix,
't': six.text_type(registry[name])})
LOG.info(msg)
def remove_item(self, info):
if not isinstance(info, TemplateResourceInfo):
return
registry = self._registry
for key in info.path[:-1]:
registry = registry[key]
if info.path[-1] in registry:
registry.pop(info.path[-1])
def get_rsrc_restricted_actions(self, resource_name):
"""Returns a set of restricted actions.
For a given resource we get the set of restricted actions.
Actions are set in this format via `resources`:
{
"restricted_actions": [update, replace]
}
A restricted_actions value is either `update`, `replace` or a list
of those values. Resources support wildcard matching. The asterisk
sign matches everything.
"""
ress = self._registry['resources']
restricted_actions = set()
for name_pattern, resource in six.iteritems(ress):
if fnmatch.fnmatchcase(resource_name, name_pattern):
if 'restricted_actions' in resource:
actions = resource['restricted_actions']
if isinstance(actions, six.string_types):
restricted_actions.add(actions)
elif isinstance(actions, collections.Sequence):
restricted_actions |= set(actions)
return restricted_actions
def matches_hook(self, resource_name, hook):
"""Return whether a resource have a hook set in the environment.
For a given resource and a hook type, we check to see if the passed
group of resources has the right hook associated with the name.
Hooks are set in this format via `resources`:
{
"res_name": {
"hooks": [pre-create, pre-update]
},
"*_suffix": {
"hooks": pre-create
},
"prefix_*": {
"hooks": pre-update
}
}
A hook value is either `pre-create`, `pre-update` or a list of those
values. Resources support wildcard matching. The asterisk sign matches
everything.
"""
ress = self._registry['resources']
for name_pattern, resource in six.iteritems(ress):
if fnmatch.fnmatchcase(resource_name, name_pattern):
if 'hooks' in resource:
hooks = resource['hooks']
if isinstance(hooks, six.string_types):
if hook == hooks:
return True
elif isinstance(hooks, collections.Sequence):
if hook in hooks:
return True
return False
def remove_resources_except(self, resource_name):
ress = self._registry['resources']
new_resources = {}
for name, res in six.iteritems(ress):
if fnmatch.fnmatchcase(resource_name, name):
new_resources.update(res)
if resource_name in ress:
new_resources.update(ress[resource_name])
self._registry['resources'] = new_resources
def iterable_by(self, resource_type, resource_name=None):
is_templ_type = resource_type.endswith(('.yaml', '.template'))
if self.global_registry is not None and is_templ_type:
# we only support dynamic resource types in user environments
# not the global environment.
# resource with a Type == a template
# we dynamically create an entry as it has not been registered.
if resource_type not in self._registry:
res = ResourceInfo(self, [resource_type], None)
self._register_info([resource_type], res)
yield self._registry[resource_type]
# handle a specific resource mapping.
if resource_name:
impl = self._registry['resources'].get(resource_name)
if impl and resource_type in impl:
yield impl[resource_type]
# handle: "OS::Nova::Server" -> "Rackspace::Cloud::Server"
impl = self._registry.get(resource_type)
if impl:
yield impl
# handle: "OS::*" -> "Dreamhost::*"
def is_a_glob(resource_type):
return resource_type.endswith('*')
globs = six.moves.filter(is_a_glob, six.iterkeys(self._registry))
for pattern in globs:
if self._registry[pattern].matches(resource_type):
yield self._registry[pattern]
def get_resource_info(self, resource_type, resource_name=None,
registry_type=None, ignore=None):
"""Find possible matches to the resource type and name.
Chain the results from the global and user registry to find
a match.
"""
# use cases
# 1) get the impl.
# - filter_by(res_type=X), sort_by(res_name=W, is_user=True)
# 2) in TemplateResource we need to get both the
# TemplateClass and the ResourceClass
# - filter_by(res_type=X, impl_type=TemplateResourceInfo),
# sort_by(res_name=W, is_user=True)
# - filter_by(res_type=X, impl_type=ClassResourceInfo),
# sort_by(res_name=W, is_user=True)
# 3) get_types() from the api
# - filter_by(is_user=False)
# 4) as_dict() to write to the db
# - filter_by(is_user=True)
if self.global_registry is not None:
giter = self.global_registry.iterable_by(resource_type,
resource_name)
else:
giter = []
matches = itertools.chain(self.iterable_by(resource_type,
resource_name),
giter)
for info in sorted(matches):
try:
match = info.get_resource_info(resource_type,
resource_name)
except exception.EntityNotFound:
continue
if registry_type is None or isinstance(match, registry_type):
if ignore is not None and match == ignore:
continue
# NOTE(prazumovsky): if resource_type defined in outer env
# there is a risk to lose it due to h-eng restarting, so
# store it to local env (exclude ClassResourceInfo because it
# loads from resources; TemplateResourceInfo handles by
# template_resource module).
if (match and not match.user_resource and
not isinstance(info, (TemplateResourceInfo,
ClassResourceInfo))):
self._register_info([resource_type], info)
return match
raise exception.EntityNotFound(entity='Resource Type',
name=resource_type)
def get_class(self, resource_type, resource_name=None, files=None):
info = self.get_resource_info(resource_type,
resource_name=resource_name)
return info.get_class(files=files)
def get_class_to_instantiate(self, resource_type, resource_name=None):
if resource_type == "":
msg = _('Resource "%s" has no type') % resource_name
raise exception.StackValidationFailed(message=msg)
elif resource_type is None:
msg = _('Non-empty resource type is required '
'for resource "%s"') % resource_name
raise exception.StackValidationFailed(message=msg)
elif not isinstance(resource_type, six.string_types):
msg = _('Resource "%s" type is not a string') % resource_name
raise exception.StackValidationFailed(message=msg)
try:
info = self.get_resource_info(resource_type,
resource_name=resource_name)
except exception.EntityNotFound as exc:
raise exception.StackValidationFailed(message=six.text_type(exc))
return info.get_class_to_instantiate()
def as_dict(self):
"""Return user resources in a dict format."""
def _as_dict(level):
tmp = {}
for k, v in iter(level.items()):
if isinstance(v, dict):
tmp[k] = _as_dict(v)
elif is_hook_definition(
k, v) or is_valid_restricted_action(k, v):
tmp[k] = v
elif v.user_resource:
tmp[k] = v.value
return tmp
return _as_dict(self._registry)
def get_types(self,
cnxt=None,
support_status=None,
type_name=None,
version=None,
with_description=False):
"""Return a list of valid resource types."""
# validate the support status
if support_status is not None and not support.is_valid_status(
support_status):
msg = (_('Invalid support status and should be one of %s') %
six.text_type(support.SUPPORT_STATUSES))
raise exception.Invalid(reason=msg)
def is_resource(key):
return isinstance(self._registry[key], (ClassResourceInfo,
TemplateResourceInfo))
def status_matches(cls):
return (support_status is None or
cls.get_class().support_status.status ==
support_status)
def is_available(cls):
if cnxt is None:
return True
try:
return cls.get_class().is_service_available(cnxt)
except Exception:
return False
def not_hidden_matches(cls):
return cls.get_class().support_status.status != support.HIDDEN
def is_allowed(enforcer, name):
if cnxt is None:
return True
try:
enforcer.enforce(cnxt, name)
except enforcer.exc:
return False
else:
return True
enforcer = policy.ResourceEnforcer()
def name_matches(name):
try:
return type_name is None or re.match(type_name, name)
except: # noqa
return False
def version_matches(cls):
return (version is None or
cls.get_class().support_status.version == version)
def resource_description(name, cls, with_description):
if not with_description:
return name
if cls.description == 'Plugin':
rsrc = cls.value
elif cls.description == 'Template':
rsrc = cls.get_class()
else:
rsrc = None
return {
'resource_type': name,
'description': rsrc.__doc__}
return [resource_description(name, cls, with_description)
for name, cls in six.iteritems(self._registry)
if (is_resource(name) and
name_matches(name) and
status_matches(cls) and
is_available(cls) and
is_allowed(enforcer, name) and
not_hidden_matches(cls) and
version_matches(cls))]
class Environment(object):
def __init__(self, env=None, user_env=True):
"""Create an Environment from an input dict.
The dict may be in one of two formats:
1) old-school flat parameters; or
2) newer {resource_registry: bla, parameters: foo}
:param env: the json environment
:param user_env: boolean, if False then we manage python resources too.
"""
if env is None:
env = {}
if user_env:
from heat.engine import resources
global_env = resources.global_env()
global_registry = global_env.registry
event_sink_classes = global_env.event_sink_classes
else:
global_registry = None
event_sink_classes = {}
self.param_defaults = env.get(env_fmt.PARAMETER_DEFAULTS, {})
self.registry = ResourceRegistry(global_registry, self.param_defaults)
self.registry.load(env.get(env_fmt.RESOURCE_REGISTRY, {}))
self.encrypted_param_names = env.get(env_fmt.ENCRYPTED_PARAM_NAMES, [])
if env_fmt.PARAMETERS in env:
self.params = env[env_fmt.PARAMETERS]
else:
self.params = dict((k, v) for (k, v) in six.iteritems(env)
if k not in (env_fmt.PARAMETER_DEFAULTS,
env_fmt.ENCRYPTED_PARAM_NAMES,
env_fmt.EVENT_SINKS,
env_fmt.RESOURCE_REGISTRY))
self.event_sink_classes = event_sink_classes
self._event_sinks = []
self._built_event_sinks = []
self._update_event_sinks(env.get(env_fmt.EVENT_SINKS, []))
self.constraints = {}
self.stack_lifecycle_plugins = []
def load(self, env_snippet):
self.registry.load(env_snippet.get(env_fmt.RESOURCE_REGISTRY, {}))
self.params.update(env_snippet.get(env_fmt.PARAMETERS, {}))
self.param_defaults.update(
env_snippet.get(env_fmt.PARAMETER_DEFAULTS, {}))
self._update_event_sinks(env_snippet.get(env_fmt.EVENT_SINKS, []))
def user_env_as_dict(self):
"""Get the environment as a dict, ready for storing in the db."""
return {env_fmt.RESOURCE_REGISTRY: self.registry.as_dict(),
env_fmt.PARAMETERS: self.params,
env_fmt.PARAMETER_DEFAULTS: self.param_defaults,
env_fmt.ENCRYPTED_PARAM_NAMES: self.encrypted_param_names,
env_fmt.EVENT_SINKS: self._event_sinks}
def register_class(self, resource_type, resource_class, path=None):
self.registry.register_class(resource_type, resource_class, path=path)
def register_constraint(self, constraint_name, constraint):
self.constraints[constraint_name] = constraint
def register_stack_lifecycle_plugin(self, stack_lifecycle_name,
stack_lifecycle_class):
self.stack_lifecycle_plugins.append((stack_lifecycle_name,
stack_lifecycle_class))
def register_event_sink(self, event_sink_name, event_sink_class):
self.event_sink_classes[event_sink_name] = event_sink_class
def get_class(self, resource_type, resource_name=None, files=None):
return self.registry.get_class(resource_type, resource_name,
files=files)
def get_class_to_instantiate(self, resource_type, resource_name=None):
return self.registry.get_class_to_instantiate(resource_type,
resource_name)
def get_types(self,
cnxt=None,
support_status=None,
type_name=None,
version=None,
with_description=False):
return self.registry.get_types(cnxt,
support_status=support_status,
type_name=type_name,
version=version,
with_description=with_description)
def get_resource_info(self, resource_type, resource_name=None,
registry_type=None, ignore=None):
return self.registry.get_resource_info(resource_type, resource_name,
registry_type, ignore=ignore)
def get_constraint(self, name):
return self.constraints.get(name)
def get_stack_lifecycle_plugins(self):
return self.stack_lifecycle_plugins
def _update_event_sinks(self, sinks):
self._event_sinks.extend(sinks)
for sink in sinks:
sink = sink.copy()
sink_class = sink.pop('type')
sink_class = self.event_sink_classes[sink_class]
self._built_event_sinks.append(sink_class(**sink))
def get_event_sinks(self):
return self._built_event_sinks
def get_child_environment(parent_env, child_params, item_to_remove=None,
child_resource_name=None):
"""Build a child environment using the parent environment and params.
This is built from the child_params and the parent env so some
resources can use user-provided parameters as if they come from an
environment.
1. resource_registry must be merged (child env should be loaded after the
parent env to take precedence).
2. child parameters must overwrite the parent's as they won't be relevant
in the child template.
If `child_resource_name` is provided, resources in the registry will be
replaced with the contents of the matching child resource plus anything
that passes a wildcard match.
"""
def is_flat_params(env_or_param):
if env_or_param is None:
return False
for sect in env_fmt.SECTIONS:
if sect in env_or_param:
return False
return True
child_env = parent_env.user_env_as_dict()
child_env[env_fmt.PARAMETERS] = {}
flat_params = is_flat_params(child_params)
new_env = Environment()
if flat_params and child_params is not None:
child_env[env_fmt.PARAMETERS] = child_params
new_env.load(child_env)
if not flat_params and child_params is not None:
new_env.load(child_params)
if item_to_remove is not None:
new_env.registry.remove_item(item_to_remove)
if child_resource_name:
new_env.registry.remove_resources_except(child_resource_name)
return new_env
def read_global_environment(env, env_dir=None):
if env_dir is None:
cfg.CONF.import_opt('environment_dir', 'heat.common.config')
env_dir = cfg.CONF.environment_dir
try:
env_files = glob.glob(os.path.join(env_dir, '*'))
except OSError as osex:
LOG.error(_LE('Failed to read %s'), env_dir)
LOG.exception(osex)
return
for file_path in env_files:
try:
with open(file_path) as env_fd:
LOG.info(_LI('Loading %s'), file_path)
env_body = env_fmt.parse(env_fd.read())
env_fmt.default_for_missing(env_body)
env.load(env_body)
except ValueError as vex:
LOG.error(_LE('Failed to parse %(file_path)s'), {
'file_path': file_path})
LOG.exception(vex)
except IOError as ioex:
LOG.error(_LE('Failed to read %(file_path)s'), {
'file_path': file_path})
LOG.exception(ioex)
|
py | b40688bf3217f87b535c87e18d559b6973ba3714 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Packages of Christophe
from datetime import datetime
import time
import json
import math
import os, sys
import socket
import traceback
import urllib2 as urllib
import os.path
user = "GW3"
test = True
# True to run the code locally
# False to implement the code on the server
# 1) Ensure to run in the user home directory
# !!! MUST NOT BE CHANGED !!!
if test:
host = "greenwall.gembloux.uliege.be"
else:
host = "localhost"
# Ensure to run in the user home directory
DIR_BASE = os.path.expanduser("~")
if not os.path.samefile(os.getcwd(), DIR_BASE):
os.chdir(DIR_BASE)
print(os.getcwd())
# 2)Ensure to be the only instance to run
# !!! MUST NOT BE CHANGED !!!
# Explanation: if another program is running, it gets killed and replaced by this one
pid = str(os.getpid())
_lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
_lock_socket.bind('\0' + user)
print('Socket ' + user + ' now locked for process #' + pid)
# Make the current pid available to be able to kill the process...
open("pid.txt", 'w').write(pid)
except socket.error:
current = open("pid.txt", 'r').read()
print(user + ' lock exists for process #' + current + " : may be you should ./clean.sh !")
sys.exit()
# 3) Date determination
# !!! MUST NOT BE CHANGED !!!
# Explanation: EPOCH time is the number of seconds since 1/1/1970
def get_timestamp():
return int(time.time())
# Transform an EPOCH time in a lisible date (for Grafana)
def formatDate(epoch):
dt = datetime.fromtimestamp(epoch)
return dt.isoformat()
# Transform an EPOCH time in a lisible date (for Grafana)
def formatDateGMT(epoch):
dt = datetime.fromtimestamp(epoch - (2 * 60 * 60)) # We are in summer and in Belgium !
return dt.isoformat()
delimiters = ' \t\n\r\"\''
# 4) Getting the list of all available sensors
# !!! MUST NOT BE CHANGED !!!
dataFile = None
try: # urlopen not usable with "with"
url = "http://" + host + "/api/grafana/search"
dataFile = urllib.urlopen(url, json.dumps(""), 20)
result = json.load(dataFile)
#for index in result:
#print(index)
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
if dataFile:
dataFile.close()
# 5) Irrigation scheme: collecting sensor readings, taking a decision to irrigate or not
# and sending the instructions to the valves
# !!! THIS IS WHERE WE MAKE CHANGES !!!
"""
Objective: Your program must create a data file with one column with the Linux EPOCH time
and your valve state (0=closed, 1=opened)
"""
while (True):
# __________________________________________________________________
# a. reading all values of the last 5 minutes (5 minutes of 60 seconds)
"""
sensors' names:
- HUM7 : first humidity sensor [V]
- HUM8 : second humidity sensor [V]
- HUM9 : third humidity sensor [V]
- SDI11 : humidity sensor temperature [°C]
"""
dataFile = None
try: # urlopen not usable with "with"
url = "http://" + host + "/api/grafana/query"
now = get_timestamp()
gr = {'range': {'from': formatDateGMT(now - (1 * 5 * 60)), 'to': formatDateGMT(now)}, \
'targets': [{'target': 'HUM7'}, {'target': 'HUM8'}, {'target': 'HUM9'}, {'target': 'SDI11'}]}
data = json.dumps(gr)
#print(data)
dataFile = urllib.urlopen(url, data, 20)
result = json.load(dataFile)
if result:
#print(result)
for target in result:
# print target
index = target.get('target')
for datapoint in target.get('datapoints'):
value = datapoint[0]
stamp = datapoint[1] / 1000
#print(index + ": " + formatDate(stamp) + " = " + str(value))
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
if dataFile:
dataFile.close()
# ________________________________________________________________________
# b. Choose to use Plan A or not
# ---------------------------------------------------------------------------
# 5.1) Parameters
# Acceptable standard deviation
std_threshold = 0.03 # Humidity sensor uncertainty[-]
# --------------------------------------------------------------------------
# 5.2) Check for NaN values
# Build lists
Vraw7 = []
Vraw8 = []
Vraw9 = []
length_result = len(result[0].get('datapoints'))
for i in range(0, length_result):
Vraw7.append(result[0].get('datapoints')[i][0])
Vraw8.append(result[1].get('datapoints')[i][0])
Vraw9.append(result[2].get('datapoints')[i][0])
print (
"""####################################
Sensor readings
####################################"""
)
print 'HUM7 [V]:', Vraw7
print 'HUM8 [V]:', Vraw8
print 'HUM9 [V]:', Vraw9
# Find NaN values
Vraw7_NaN = []
Vraw8_NaN = []
Vraw9_NaN = []
for i in range(0, length_result):
Vraw7_NaN.append(math.isnan(Vraw7[i]))
Vraw8_NaN.append(math.isnan(Vraw8[i]))
Vraw9_NaN.append(math.isnan(Vraw8[i]))
print (
"""####################################
Presence of NaN values
####################################"""
)
print 'HUM7:', Vraw7_NaN.count(True)
print 'HUM8:', Vraw8_NaN.count(True)
print 'HUM9:', Vraw9_NaN.count(True)
# --------------------------------------------------------------------------
# 5.3). Check for outliers
# build function
def detect_outlier(list_data, threshold):
length_list = len(list_data)
# mean
mean = math.fsum(list_data)/length_list # Compute mean
# standard deviation
var = 0 # Initialize variance
for j in range(0, length_list):
var += (list_data[i] - mean) ** 2 / length_list # Compute variance
std = math.sqrt(var) # Compute standard deviation
outliers = [] # Initialize list of outliers
for y in list_data: # Loop on data
z_score = (y - mean) / std # Compute z-score
if abs(z_score) > threshold: # z-score compared to a threshold
outliers.append(y) # y considered as an outlier
return outliers
# Build lists of outliers
Vraw7_outliers = detect_outlier(Vraw7, 3)
Vraw8_outliers = detect_outlier(Vraw8, 3)
Vraw9_outliers = detect_outlier(Vraw9, 3)
# Compute number of outliers per list
Vraw7_NbOut = len(Vraw7_outliers)
Vraw8_NbOut = len(Vraw8_outliers)
Vraw9_NbOut = len(Vraw9_outliers)
print (
"""####################################
Presence of outliers
####################################"""
)
print 'Method: z-scores'
print 'HUM7:', Vraw7_NbOut
print 'HUM8:', Vraw8_NbOut
print 'HUM9:', Vraw9_NbOut
# --------------------------------------------------------------------------
# 5.4) Compute standard deviation
# mean function
def std(list_data):
length_list = len(list_data)
# mean
mean = math.fsum(list_data)/length_list # Compute mean
# standard deviation
var = 0 # Initialize variance
for j in range(0, length_list):
var += (list_data[i] - mean) ** 2 / length_list # Compute variance
std = math.sqrt(var) / mean # Compute standard deviation
return std
std7 = std(Vraw7)
std8 = std(Vraw8)
std9 = std(Vraw9)
print(
"""####################################
Standard deviation
####################################"""
)
print 'Threshold [-]:',std_threshold
print 'HUM7:', std7
print 'HUM8:', std8
print 'HUM9:', std9
# --------------------------------------------------------------------------
# 5.5) Can Plan A be used?
# 5.5.1) Check conditions for each sensor
conditionA = [] # List with 1 if OK and 0 if not OK
print (
"""####################################
Are sensor's readings usable?
####################################"""
)
# HUM7
if (
all(x == False for x in Vraw7_NaN) and # No NaN values
(std7 < std_threshold) and # Standard deviation < threshold
Vraw7_NbOut == 0 # No outliers
):
conditionA.append(1)
print 'HUM7 can be used'
else:
conditionA.append(0)
print 'HUM7 can not be used'
# HUM8
if (
all(x == False for x in Vraw8_NaN) and # No NaN values
(std8 < std_threshold) and # Standard deviation < threshold
Vraw8_NbOut == 0 # No outliers
):
conditionA.append(1)
print 'HUM8 can be used'
else:
conditionA.append(0)
print 'HUM8 can not be used'
# HUM9
if (
all(x == False for x in Vraw9_NaN) and # No NaN values
(std9 < std_threshold) and # Standard deviation < threshold
Vraw9_NbOut == 0 # No outliers
):
conditionA.append(1)
print 'HUM9 can be used'
else:
conditionA.append(0)
print 'HUM9 can not be used'
# 5.4.2) Choose to use humidity sensors or not
NbHumMin = 2 # Minimal number of operating humidity sensor
if conditionA.count(1) >= NbHumMin:
print("Plan A can be run")
timestamp = get_timestamp()
if os.path.isfile('filename.txt'):
print ("File exist")
# erase the current file and open the valve in 30 seconds
open("filename.txt", 'a').write(str(timestamp) + ";A\n")
else:
print ("File not exist")
file("filename.txt","w+")
open("filename.txt", 'a').write(str(timestamp) + ";A\n")
# Irrigate with if conditionA == 1 to only operating sensors
else:
print("Go to plan B")
timestamp = get_timestamp()
if os.path.isfile('filename.txt'):
print ("File exist")
# erase the current file and open the valve in 30 seconds
open("filename.txt", 'a').write(str(timestamp) + ";B\n")
else:
print ("File not exist")
file("filename.txt", "w+")
open("filename.txt", 'a').write(str(timestamp) + ";B\n")
# sleep for 24 hours (in seconds)
time.sleep(24 * 60 * 60)
|
py | b4068912c8bb234eff54d6b4feae499f7e8ab30c | import warnings
import torch
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple(int(x) for x in size)
return F.interpolate(input, size, scale_factor, mode, align_corners)
|
py | b406892b7367c279ec392aea354595ef4a3ea16a | """integrating_vue URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path, re_path
from app_one import views as appone_views
from app_two import views as apptwo_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', appone_views.index, name="root_one_index"),
path('appone/', appone_views.index, name="one_index"),
path('apptwo/', apptwo_views.index, name="two_index"),
]
# In developement, proxy hot update requests to webpack-dev-server since they can't
if settings.DEBUG:
try:
from revproxy.views import ProxyView
except ImportError:
pass
else:
from revproxy import utils
# responses bigger than MIN_STREAMING_LENGTH are streamed, breaking Webpack dev server
# We monkey patch it to a big enough value, here 256MB
utils.MIN_STREAMING_LENGTH = 256 * 1024 * 1024 # noqa
urlpatterns += [
re_path(r'(?P<path>.*\.hot-update\..*)$',
ProxyView.as_view(upstream=settings.WEBPACK_DEVSERVER_URL),
name='hotreload_proxy'),
]
|
py | b40689f474871344d31144dafaf33fd1f3c15d12 | """Commit parser helpers
"""
from typing import Tuple
def parse_text_block(text: str) -> Tuple[str, str]:
"""
This will take a text block and return a tuple with body and footer,
where footer is defined as the last paragraph.
:param text: The text string to be divided.
:return: A tuple with body and footer,
where footer is defined as the last paragraph.
"""
body, footer = '', ''
if text:
body = text.split('\n\n')[0]
if len(text.split('\n\n')) == 2:
footer = text.split('\n\n')[1]
return body.replace('\n', ' '), footer.replace('\n', ' ')
|
py | b4068a85ea4acc2ae467e754ba213ff0627c881a | import numpy as np
from emma.utils.utils import EMMAException, int_to_one_hot, bytearray_to_many_hot
from emma.attacks.leakagemodels import LeakageModel
class AIInputType:
"""
Class that defines all possible types of inputs for the ML models. Input classes must have an attribute
'input_type' with one of the values defined in this class.
"""
SIGNAL = 'signal'
SIGNAL_PLAINTEXT = 'signal_plaintext'
SIGNAL_PLAINTEXT_OH = 'signal_plaintext_oh'
SIGNAL_PLAINTEXT_MH = 'signal_plaintext_mh'
# For testing purposes
SIGNAL_KEY = 'signal_key'
SIGNAL_PLAINTEXT_KEY = 'signal_plaintext_key'
PLAINTEXT_KEY = 'plaintext_key'
PLAINTEXT_KEY_OH = 'plaintext_key_oh'
SIGNAL_LEAKAGE = 'signal_leakage'
RANDOM = 'random'
@classmethod
def choices(cls):
"""
Get all possible AIInputTypes in list form
:return:
"""
c = []
for k, v in cls.__dict__.items():
if k[:2] != '__' and type(v) is str:
c.append(v)
return c
class AIInputMeta(type):
"""
Metaclass used for checking whether the child class contains a valid input_type attribute.
"""
class BadAIInputClassException(EMMAException):
pass
class InvalidInputTypeException(EMMAException):
pass
def __new__(mcs, name, bases, class_dict):
if bases != (object,): # Do not validate LeakageModel class
if 'input_type' not in class_dict:
raise AIInputMeta.BadAIInputClassException
if class_dict['input_type'] not in AIInputType.choices():
raise AIInputMeta.InvalidInputTypeException
return type.__new__(mcs, name, bases, class_dict)
class AIInput(object, metaclass=AIInputMeta):
"""
AI input base class.
"""
class UnknownAIInputException(EMMAException):
pass
def __new__(cls, conf):
"""
Called when instantiating an AIInput object. Returns an instance of the appropriate class depending on the
input_type parameter.
:param conf:
:return:
"""
for subclass in cls._get_subclasses():
if subclass.input_type == conf.input_type:
return object.__new__(subclass) # Avoid recursion by calling object.__new__ instead of cls.__new__
raise AIInput.UnknownAIInputException
def __init__(self, conf):
self.conf = conf
@classmethod
def _get_subclasses(cls):
for subclass in cls.__subclasses__():
if cls is not object:
for subsubclass in subclass._get_subclasses(): # Also yield children of children
yield subsubclass
yield subclass
def get_trace_inputs(self, trace):
raise NotImplementedError
def get_trace_set_inputs(self, trace_set):
"""
Givem a trace set, returns inputs suitable for training an AI model.
:param trace_set:
:return:
"""
inputs = []
for trace in trace_set.traces:
inputs.append(self.get_trace_inputs(trace))
result = np.array(inputs)
# CNNs expect a channels dimension
if self.conf.cnn:
result = np.expand_dims(result, axis=-1)
return result
class SignalAIInput(AIInput):
input_type = AIInputType.SIGNAL
def get_trace_inputs(self, trace):
return trace.signal
class SignalPlaintextAIInput(AIInput):
input_type = AIInputType.SIGNAL_PLAINTEXT
def get_trace_inputs(self, trace):
return np.concatenate((trace.signal, trace.plaintext))
class SignalPlaintextMHAIInput(AIInput):
input_type = AIInputType.SIGNAL_PLAINTEXT_MH
def get_trace_inputs(self, trace):
return np.concatenate((trace.signal, bytearray_to_many_hot(trace.plaintext)))
class SignalPlaintextOHAIInput(AIInput):
input_type = AIInputType.SIGNAL_PLAINTEXT_OH
def get_trace_inputs(self, trace):
result = []
for p in trace.plaintext:
result.append(int_to_one_hot(p, 256))
result = np.concatenate(result)
return np.concatenate((trace.signal, result))
class SignalKeyAIInput(AIInput):
input_type = AIInputType.SIGNAL_KEY
def get_trace_inputs(self, trace):
return np.concatenate((trace.signal, trace.key))
class SignalPlaintextKeyAIInput(AIInput):
input_type = AIInputType.SIGNAL_PLAINTEXT_KEY
def get_trace_inputs(self, trace):
return np.concatenate((trace.signal, trace.plaintext, trace.key))
class PlaintextKeyAIInput(AIInput):
input_type = AIInputType.PLAINTEXT_KEY
def get_trace_inputs(self, trace):
return np.concatenate((trace.plaintext, trace.key))
class PlaintextKeyOHAIInput(AIInput):
input_type = AIInputType.PLAINTEXT_KEY_OH
def get_trace_inputs(self, trace):
result = []
for p in trace.plaintext:
result.append(int_to_one_hot(p, 256))
for k in trace.key:
result.append(int_to_one_hot(k, 256))
return np.concatenate(result)
class SignalLeakageAIInput(AIInput):
input_type = AIInputType.SIGNAL_LEAKAGE
def __init__(self, conf):
super().__init__(conf)
self.leakage_model = LeakageModel(conf)
def get_trace_inputs(self, trace):
leakages = []
for k in range(16):
leakage = self.leakage_model.get_trace_leakages(trace, k)
if isinstance(leakage, list) or isinstance(leakage, np.ndarray):
leakages.extend(list(leakage))
else:
leakages.append(leakage)
leakages = np.array(leakages)
return np.concatenate((trace.signal, leakages))
class RandomInput(AIInput):
input_type = AIInputType.RANDOM
def get_trace_inputs(self, trace):
return np.random.uniform(0.0, 1.0, len(trace.signal))
|
py | b4068bdca82aad91e28eb75d2d04a99007edf1bc | from nanopores import *
from checksolve import check_solve
geo_name = "H_geo"
x0 = [0.0, 0.0, 0.0e-9]
# 2D with molecule
generate_mesh(2.0, geo_name, x0=x0)
geo = geo_from_name(geo_name, x0=x0)
p = PNPSAxisym(geo)
check_solve(p)
|
py | b4068d75a75f2f29da9a9586874b6ae47d90c257 | """This contains the configuration of the Singleton application."""
# Django Imports
from django.apps import AppConfig
class SingletonConfig(AppConfig):
name = "ghostwriter.singleton"
def ready(self):
try:
import ghostwriter.singleton.signals # noqa F401 isort:skip
except ImportError:
pass
|
py | b4068d88400b369e07171724a4d83b814d8c3978 | from prisma import Prisma, Base64
async def filtering(client: Prisma) -> None:
# case: all valid filter fields
await client.types.find_first(
where={
'bytes': Base64.encode(b'foo'),
},
)
await client.types.find_first(
where={
'bytes': {
'equals': Base64.encode(b'a'),
},
},
)
await client.types.find_first(
where={
'bytes': {
'not': Base64.encode(b'a'),
},
},
)
await client.types.find_first(
where={
'bytes': {
'not': {
'equals': Base64.encode(b'a'),
},
},
},
)
# case: invalid types
await client.types.find_first(
where={ # E: Argument of type "dict[str, bytes]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bytes': b'foo',
},
)
await client.types.find_first(
where={ # E: Argument of type "dict[str, bytes]" cannot be assigned to parameter "where" of type "TypesWhereInput | None" in function "find_first"
'bytes': b'foo',
},
)
|
py | b4068db31cce94159fc9d591723ee0dcc68ea265 | import discord
import os
import time
## ended
## looping
import discord
import os
import time
## ended
|
py | b4069018b591fed93045bd1036c6f83154831aef | from __future__ import annotations
import random
import string
from typing import Union, Generator, Callable, SupportsRound, Any
class IterationNotCompleted(BaseException):
"""Generator stopped iteration. Make sure it iterates over all arrays length"""
pass
class UndefinedVariable(Exception):
"""Exception raised for UndefinedVariable"""
def __init__(self, variable):
self.message = f"Variable {variable} not defined."
class BoundingError(Exception):
"""Exception raised for BoundingError"""
def __init__(self, lower_bound, upper_bound):
self.message = f"Lower bound less than upper bound." + \
f" lower bound: {lower_bound}, upper bound: {upper_bound}"
super(BoundingError, self).__init__(self.message)
class Variables:
"""Simple Base class for all variables
:kwargs
-------
* **generator**: ``Callable[..., Generator]``
A Callable function that returns a Generator used to generate variable.
* **decimal_places**: ``int``
Rounds generated variables, **default**: ``no rounding``
"""
def __init__(self, *args, **kwargs):
self.args = args
self.generator = kwargs.get('generator')
self.decimal_places = kwargs.get('decimal_places')
# last will used to access last generated variable
# in case of use Variable in other generators as parameters
self.last = None
# Initialize
self.next()
def rounder(self, val):
if self.decimal_places and isinstance(val, SupportsRound):
self.last = round(val, self.decimal_places)
else:
self.last = val
return self.last
def next(self):
tmp_args = [x if not isinstance(x, (IntVar, FloatVar)) else x.last for x in self.args]
self.last = self.rounder(self.generator(*tmp_args))
return self.last
class BoundedVar(Variables):
"""A simple wrapper for variables that has bounding option.
:raise *BoundingError* if bounding is wrong.
"""
def __init__(self, lower_bound, upper_bound, *args, **kwargs):
tmp_upper = upper_bound if not isinstance(upper_bound, Variables) else upper_bound.last
tmp_lower = lower_bound if not isinstance(lower_bound, Variables) else lower_bound.last
if tmp_upper < tmp_lower:
raise BoundingError(lower_bound, upper_bound)
super().__init__(lower_bound, upper_bound, *args, **kwargs)
class IntVar(BoundedVar):
"""Generates random random integer between lower and upper bound
using random.randint callable.
"""
def __init__(self, lower_bound: Union[IntVar, int], upper_bound: Union[IntVar, int], **kwargs):
super().__init__(lower_bound, upper_bound, generator=random.randint, **kwargs)
class FloatVar(BoundedVar):
"""Generates random random float between lower and upper bound
using random.uniform callable.
"""
def __init__(self, lower_bound: Union[float, int, IntVar, FloatVar],
upper_bound: Union[float, int, IntVar, FloatVar], **kwargs):
super().__init__(lower_bound, upper_bound, generator=random.uniform, **kwargs)
class Collections(Variables):
"""A base class for all collection type variables.
use this CustomArray() instead if you want to make cus
"""
def __init__(self, *args, **kwargs):
self.length = kwargs.get('length') if kwargs.get('length') else 1
super(Collections, self).__init__(*args, **kwargs)
def next(self):
# Using temp args to get current args Variable if they are
# [:Variable:] for current generation,
tmp_args = [x if not isinstance(x, (IntVar, FloatVar)) else x.last for x in self.args]
tmp_length = self.length if not isinstance(self.length, IntVar) else self.length.last
# not using temp args/length will cause to set arguments as a not
# changeable integer for next generations.
return [self.rounder(self.generator(*tmp_args)) for _ in range(tmp_length)]
class CustomArray(Collections):
"""A class to build custom arrays using a Generator."""
# The difference with :Collections: class is :CustomArray: gets a Callable[..., Generator]
# that yields each member for a generation, But Collection uses a generator
# that returns each member of array(e.g random.randint).
def __init__(self, length: Union[int, IntVar], generator: Callable[..., Generator[Any, Any, Any]], *args, **kwargs):
super().__init__(*args, generator=generator, length=length, **kwargs)
def next(self):
tmp_args = [x if not isinstance(x, (IntVar, FloatVar)) else x.last for x in self.args]
tmp_length = self.length if not isinstance(self.length, IntVar) else self.length.last
# Making a generator from Callable[..., Generator] function for each generation
gen = self.generator(*tmp_args)
try:
self.last = [self.rounder(next(gen)) for _ in range(tmp_length)]
except StopIteration:
raise IterationNotCompleted("\nGenerator stopped iteration. Make sure it iterates over all arrays length.")
return self.last
class IntArray(Collections):
""""Generates random integer for each member of array using random.randint generator"""
def __init__(self, lower_bound: Union[IntVar, int], upper_bound: Union[IntVar, int],
length: Union[IntVar, int]):
super().__init__(lower_bound, upper_bound, length=length, generator=random.randint, decimal_places=0)
class Array2d(Collections):
""""Generates random integer for each member of array using random.randint generator"""
def __init__(self, array: Union[IntArray, FloatArray, CustomArray, Array2d], length: Union[IntVar, int]):
self.array = array
super().__init__(length=length)
def next(self):
tmp_args = [x if not isinstance(x, (IntVar, FloatVar)) else x.last for x in self.args]
tmp_length = self.length if not isinstance(self.length, IntVar) else self.length.last
self.last = [self.array.next() for _ in range(tmp_length)]
return self.last
class FloatArray(Collections):
""""Generates random float for each member of array using random.uniform generator"""
def __init__(self, lower_bound: Union[int, float, IntVar, FloatVar],
upper_bound: Union[int, float, IntVar, FloatVar],
length: Union[int, IntVar], decimal_places: int = 1):
super().__init__(lower_bound, upper_bound, length=length, generator=random.uniform,
decimal_places=decimal_places)
class ChoiceList(Collections):
"""Generates random choice from given list with random.choice generator"""
def __init__(self, length: Union[int, IntVar], choice_list: list, *args, **kwargs):
super().__init__(choice_list, *args, generator=random.choice, length=length, **kwargs)
class CharArray(ChoiceList):
""""Generates random choice from all available english characters"""
def __init__(self, length: Union[int, IntVar]):
super().__init__(length, string.ascii_letters)
|
py | b4069049100d4b4ead8a11694f70bd8c7554bcd1 | from setuptools import setup, find_packages
version = "1.4.2"
with open("README.md", "r", encoding="utf-8") as readme_file:
long_description = readme_file.read()
# with open("requirements.txt", "r", encoding="utf-8") as req_file:
# requirements = req_file.readlines()
setup(
name="vscode-ext",
version=version,
description="Create VSCode Extensions with python",
long_description=long_description,
long_description_content_type="text/markdown",
author="Swas.py",
author_email="[email protected]",
packages=find_packages(),
include_package_data=True,
url = "https://github.com/CodeWithSwastik/vscode-ext",
project_urls={
"Issue tracker": "https://github.com/CodeWithSwastik/vscode-ext/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
install_requires=[],
python_requires=">=3.6",
)
|
py | b4069216014099b0a16d088e7ee9654a724f5e01 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtCore import Qt, QItemSelectionModel
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont
from PyQt5.QtWidgets import QAbstractItemView
from PyQt5.QtWidgets import QHeaderView, QMenu, QVBoxLayout, QGridLayout, QLabel, QTreeWidget, QTreeWidgetItem
from electrum.i18n import _
from electrum.util import format_time, PR_UNPAID, PR_PAID, PR_INFLIGHT
from electrum.util import get_request_status
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum.lnutil import format_short_channel_id
from electrum.bitcoin import COIN
from electrum import constants
from .util import (MyTreeView, read_QIcon, MONOSPACE_FONT,
import_meta_gui, export_meta_gui, pr_icons)
from .util import CloseButton, Buttons
from .util import WindowModalDialog
ROLE_REQUEST_TYPE = Qt.UserRole
ROLE_REQUEST_ID = Qt.UserRole + 1
class InvoiceList(MyTreeView):
class Columns(IntEnum):
DATE = 0
DESCRIPTION = 1
AMOUNT = 2
STATUS = 3
headers = {
Columns.DATE: _('Date'),
Columns.DESCRIPTION: _('Description'),
Columns.AMOUNT: _('Amount'),
Columns.STATUS: _('Status'),
}
filter_columns = [Columns.DATE, Columns.DESCRIPTION, Columns.AMOUNT]
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.DESCRIPTION,
editable_columns=[])
self.setSortingEnabled(True)
self.setModel(QStandardItemModel(self))
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.update()
def update_item(self, key, status):
req = self.parent.wallet.get_invoice(key)
if req is None:
return
model = self.model()
for row in range(0, model.rowCount()):
item = model.item(row, 0)
if item.data(ROLE_REQUEST_ID) == key:
break
else:
return
status_item = model.item(row, self.Columns.STATUS)
status, status_str = get_request_status(req)
if self.parent.wallet.lnworker:
log = self.parent.wallet.lnworker.logs.get(key)
if log and status == PR_INFLIGHT:
status_str += '... (%d)'%len(log)
status_item.setText(status_str)
status_item.setIcon(read_QIcon(pr_icons.get(status)))
def update(self):
_list = self.parent.wallet.get_invoices()
# filter out paid invoices unless we have the log
lnworker_logs = self.parent.wallet.lnworker.logs if self.parent.wallet.lnworker else {}
_list = [x for x in _list if x and x.get('status') != PR_PAID or x.get('rhash') in lnworker_logs]
self.model().clear()
self.update_headers(self.__class__.headers)
for idx, item in enumerate(_list):
invoice_type = item['type']
if invoice_type == PR_TYPE_LN:
key = item['rhash']
icon_name = 'lightning.png'
elif invoice_type == PR_TYPE_ONCHAIN:
key = item['id']
icon_name = 'bitcoin.png'
if item.get('bip70'):
icon_name = 'seal.png'
else:
raise Exception('Unsupported type')
status, status_str = get_request_status(item)
message = item['message']
amount = item['amount']
timestamp = item.get('time', 0)
date_str = format_time(timestamp) if timestamp else _('Unknown')
amount_str = self.parent.format_amount(amount, whitespaces=True)
labels = [date_str, message, amount_str, status_str]
items = [QStandardItem(e) for e in labels]
self.set_editability(items)
items[self.Columns.DATE].setIcon(read_QIcon(icon_name))
items[self.Columns.STATUS].setIcon(read_QIcon(pr_icons.get(status)))
items[self.Columns.DATE].setData(key, role=ROLE_REQUEST_ID)
items[self.Columns.DATE].setData(invoice_type, role=ROLE_REQUEST_TYPE)
self.model().insertRow(idx, items)
self.selectionModel().select(self.model().index(0,0), QItemSelectionModel.SelectCurrent)
# sort requests by date
self.model().sort(self.Columns.DATE)
# hide list if empty
if self.parent.isVisible():
b = self.model().rowCount() > 0
self.setVisible(b)
self.parent.invoices_label.setVisible(b)
self.filter()
def import_invoices(self):
import_meta_gui(self.parent, _('invoices'), self.parent.invoices.import_file, self.update)
def export_invoices(self):
export_meta_gui(self.parent, _('invoices'), self.parent.invoices.export_file)
def create_menu(self, position):
items = self.selected_in_column(0)
if len(items)>1:
keys = [ item.data(ROLE_REQUEST_ID) for item in items]
invoices = [ self.parent.wallet.get_invoice(key) for key in keys]
invoices = [ invoice for invoice in invoices if invoice['status'] == PR_UNPAID and invoice['type'] == PR_TYPE_ONCHAIN]
if len(invoices) > 1:
menu = QMenu(self)
menu.addAction(_("Pay multiple invoices"), lambda: self.parent.pay_multiple_invoices(invoices))
menu.exec_(self.viewport().mapToGlobal(position))
return
idx = self.indexAt(position)
item = self.model().itemFromIndex(idx)
item_col0 = self.model().itemFromIndex(idx.sibling(idx.row(), self.Columns.DATE))
if not item or not item_col0:
return
key = item_col0.data(ROLE_REQUEST_ID)
request_type = item_col0.data(ROLE_REQUEST_TYPE)
menu = QMenu(self)
self.add_copy_menu(menu, idx)
invoice = self.parent.wallet.get_invoice(key)
menu.addAction(_("Details"), lambda: self.parent.show_invoice(key))
if invoice['status'] == PR_UNPAID:
menu.addAction(_("Pay"), lambda: self.parent.do_pay_invoice(invoice))
if self.parent.wallet.lnworker:
log = self.parent.wallet.lnworker.logs.get(key)
if log:
menu.addAction(_("View log"), lambda: self.show_log(key, log))
menu.addAction(_("Delete"), lambda: self.parent.delete_invoice(key))
menu.exec_(self.viewport().mapToGlobal(position))
def show_log(self, key, log):
d = WindowModalDialog(self, _("Payment log"))
vbox = QVBoxLayout(d)
log_w = QTreeWidget()
log_w.setHeaderLabels([_('Route'), _('Channel ID'), _('Message'), _('Blacklist')])
for i, (route, success, failure_log) in enumerate(log):
route_str = '%d'%len(route)
if not success:
sender_idx, failure_msg, blacklist = failure_log
short_channel_id = route[sender_idx+1].short_channel_id
data = failure_msg.data
message = repr(failure_msg.code)
else:
short_channel_id = route[-1].short_channel_id
message = _('Success')
blacklist = False
chan_str = format_short_channel_id(short_channel_id)
x = QTreeWidgetItem([route_str, chan_str, message, repr(blacklist)])
log_w.addTopLevelItem(x)
vbox.addWidget(log_w)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
|
py | b40692593099b2b045a56632062e91474d413653 | #!/usr/bin/env python
"""t is for people that want do things, not organize their tasks."""
from __future__ import with_statement, print_function
import os, re, sys, hashlib, time
from operator import itemgetter
from optparse import OptionParser, OptionGroup
import json
class InvalidTaskfile(Exception):
"""Raised when the path to a task file already exists as a directory."""
pass
class AmbiguousPrefix(Exception):
"""Raised when trying to use a prefix that could identify multiple tasks."""
def __init__(self, prefix):
super(AmbiguousPrefix, self).__init__()
self.prefix = prefix
class UnknownPrefix(Exception):
"""Raised when trying to use a prefix that does not match any tasks."""
def __init__(self, prefix):
super(UnknownPrefix, self).__init__()
self.prefix = prefix
class BadFile(Exception):
"""Raised when something else goes wrong trying to work with the task file."""
def __init__(self, path, problem):
super(BadFile, self).__init__()
self.path = path
self.problem = problem
def _hash(text):
"""Return a hash of the given text for use as an id.
Currently SHA1 hashing is used. It should be plenty for our purposes.
"""
return hashlib.sha1((str(time.time()) + text).encode('utf-8')).hexdigest()
def _task_from_taskline(taskline):
"""Parse a taskline (from a task file) and return a task.
A taskline should be in the format:
summary text ... | {json of metadata}
The task returned will be a dictionary such as:
{ 'id': <hash id>,
'text': <summary text>,
... other metadata ... }
A taskline can also consist of only summary text, in which case the id
and other metadata will be generated when the line is read. This is
supported to enable editing of the taskfile with a simple text editor.
"""
if taskline.strip().startswith('#'):
return None
elif '|' in taskline:
text, _, meta = taskline.partition('|')
task = json.loads(meta)
task['text'] = text.strip()
else:
text = taskline.strip()
task = { 'id': _hash(text), 'text': text }
if 'timestamp' not in task:
task['timestamp'] = 0
if 'show_full_id' not in task:
task['show_full_id'] = False
if 'parent_id' not in task:
task['parent_id'] = None
return task
def _tasklines_from_tasks(tasks):
"""Parse a list of tasks into tasklines suitable for writing."""
tasklines = []
textlen = max(map(lambda t: len(t['text']), tasks)) if tasks else 0
for task in tasks:
meta = dict(task)
# remove text as it isn't part of the metadata
del meta['text']
# don't add show_full_id if it is false
if 'show_full_id' in meta and not meta['show_full_id']:
del meta['show_full_id']
# don't add parent_id if it is None
if 'parent_id' in meta and meta['parent_id'] == None:
del meta['parent_id']
tasklines.append('%s | %s\n' % (task['text'].ljust(textlen), json.dumps(meta, sort_keys=True)))
return tasklines
def _prefixes(ids):
"""Return a mapping of ids to prefixes in O(n) time.
Each prefix will be the shortest possible substring of the ID that
can uniquely identify it among the given group of IDs.
If an ID of one task is entirely a substring of another task's ID, the
entire ID will be the prefix.
"""
ps = {}
for id in ids:
id_len = len(id)
for i in range(1, id_len+1):
# identifies an empty prefix slot, or a singular collision
prefix = id[:i]
if (not prefix in ps) or (ps[prefix] and prefix != ps[prefix]):
break
if prefix in ps:
# if there is a collision
other_id = ps[prefix]
for j in range(i, id_len+1):
if other_id[:j] == id[:j]:
ps[id[:j]] = ''
else:
ps[other_id[:j]] = other_id
ps[id[:j]] = id
break
else:
ps[other_id[:id_len+1]] = other_id
ps[id] = id
else:
# no collision, can safely add
ps[prefix] = id
ps = dict(zip(ps.values(), ps.keys()))
if '' in ps:
del ps['']
return ps
class TaskDict(object):
"""A set of tasks, both finished and unfinished, for a given list.
The list's files are read from disk when the TaskDict is initialized. They
can be written back out to disk with the write() function.
"""
def __init__(self, taskdir='.', name='tasks'):
"""Initialize by reading the task files, if they exist."""
self.tasks = {}
self.done = {}
self.name = name
self.taskdir = taskdir
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
if os.path.exists(path):
try:
with open(path, 'r') as tfile:
tls = [tl.strip() for tl in tfile if tl]
tasks = map(_task_from_taskline, tls)
for task in tasks:
if task is not None:
getattr(self, kind)[task['id']] = task
except IOError as e:
raise BadFile(path, e.strerror)
def __getitem__(self, prefix):
"""Return the unfinished task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
matched = [tid for tid in self.tasks.keys() if tid.startswith(prefix)]
if len(matched) == 1:
return self.tasks[matched[0]]
elif len(matched) == 0:
raise UnknownPrefix(prefix)
elif prefix in matched:
return self.tasks[prefix]
else:
raise AmbiguousPrefix(prefix)
def add_task(self, text, verbose, quiet, task_id = None, parent_id = None):
"""Add a new, unfinished task with the given summary text."""
if not task_id:
task_id = _hash(text)
show_full_id = False
else:
show_full_id = True
if parent_id:
parent = self[parent_id]
parent_id = parent['id']
timestamp = time.time()
self.tasks[task_id] = {'id': task_id, 'text': text, 'timestamp': timestamp}
if show_full_id:
self.tasks[task_id]['show_full_id'] = show_full_id
if parent_id:
self.tasks[task_id]['parent_id'] = parent_id
if not quiet:
if verbose or show_full_id:
print(task_id)
else:
prefixes = _prefixes(self.tasks)
print(prefixes[task_id])
def edit_task(self, prefix, text):
"""Edit the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
task = self[prefix]
if text.startswith('s/') or text.startswith('/'):
text = re.sub('^s?/', '', text).rstrip('/')
find, _, repl = text.partition('/')
text = re.sub(find, repl, task['text'])
task['text'] = text
if 'id' not in task:
task['id'] = _hash(text)
def add_tag(self, task, tag):
"""Add tag to the the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
if 'tags' in task:
task['tags'].append(tag)
else:
task['tags'] = [tag]
def remove_tag(self, task, tag):
"""Remove tag to the the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
if 'tags' in task:
task['tags'].remove(tag)
if len(task['tags']) == 0:
del task['tags']
def tag(self, prefix, tags):
"""Add (or remove) tag to the the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
task = self[prefix]
for tag in tags.strip().split(' '):
if not tag:
continue
elif tag[0] == '-':
self.remove_tag(task, tag[1:])
else:
self.add_tag(task, tag)
def children(self, task):
return [self.tasks[t] for t in self.tasks if 'parent_id' in self.tasks[t] and self.tasks[t]['parent_id'] == task['id']]
def num_children(self, task):
return len(self.children(task))
def finish_task(self, prefix, force = False):
"""Mark the task with the given prefix as finished.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
if not force and self.num_children(self[prefix]) > 0:
print('cannot finish task - it has open sub-tasks. use --force to override.\n')
return
task = self.tasks.pop(self[prefix]['id'])
self.done[task['id']] = task
for child in self.children(task):
self.finish_task(child['id'])
def remove_task(self, prefix):
"""Remove the task from tasks list.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
self.tasks.pop(self[prefix]['id'])
def print_list(self, kind='tasks', verbose=False, quiet=False, grep='', parent_id=None, indent=""):
"""Print out a nicely formatted list of unfinished tasks."""
tasks = dict(getattr(self, kind).items())
label = 'prefix' if not verbose else 'id'
if not verbose:
for task_id, prefix in _prefixes(tasks).items():
if tasks[task_id]['show_full_id']:
tasks[task_id]['prefix'] = task_id
else:
tasks[task_id]['prefix'] = prefix
plen = max(map(lambda t: len(t[label]), tasks.values())) if tasks else 0
for task in sorted(tasks.values(), key=lambda t:t['timestamp']):
if grep.lower() in task['text'].lower():
if parent_id == task['parent_id']:
num_str = "(%d) " % self.num_children(task)
p = '%s - ' % task[label].ljust(plen) if not quiet else ''
if 'tags' in task:
tags_str = " ".join(["[%s]" % tag for tag in task['tags']]) + " "
else:
tags_str = ""
print(indent + num_str + p + tags_str + task['text'])
self.print_list(kind, verbose, quiet, grep, task['id'], indent + " ")
def write(self, delete_if_empty=False):
"""Flush the finished and unfinished tasks to the files on disk."""
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
tasks = sorted(getattr(self, kind).values(), key=itemgetter('id'))
if tasks or not delete_if_empty:
try:
with open(path, 'w') as tfile:
for taskline in _tasklines_from_tasks(tasks):
tfile.write(taskline)
except IOError as e:
raise BadFile(path, e.strerror)
elif not tasks and os.path.isfile(path):
os.remove(path)
def _die(message):
sys.stderr.write('error: %s\n' % message)
sys.exit(1)
def _build_parser():
"""Return a parser for the command-line interface."""
usage = "Usage: %prog [-t DIR] [-l LIST] [options] [TEXT]"
parser = OptionParser(usage=usage)
actions = OptionGroup(parser, "Actions",
"If no actions are specified the TEXT will be added as a new task.")
actions.add_option("-a", "--add", dest="add", default="",
help="add TASK with TEXT", metavar="TASK")
actions.add_option("-e", "--edit", dest="edit", default="",
help="edit TASK to contain TEXT", metavar="TASK")
actions.add_option("-f", "--finish", dest="finish",
help="mark TASK as finished", metavar="TASK")
actions.add_option("-r", "--remove", dest="remove",
help="Remove TASK from list", metavar="TASK")
actions.add_option("-s", "--sub", dest="sub",
help="add sub task to PARENT", metavar="PARENT")
actions.add_option("-x", "--tag", dest="tag",
help="add tag to TASK", metavar="TASK")
actions.add_option("--force",
action="store_true", dest="force", default=False,
help="used to force an action even if it is not recommended")
parser.add_option_group(actions)
config = OptionGroup(parser, "Configuration Options")
config.add_option("-l", "--list", dest="name", default="tasks",
help="work on LIST", metavar="LIST")
config.add_option("-t", "--task-dir", dest="taskdir", default="",
help="work on the lists in DIR", metavar="DIR")
config.add_option("-d", "--delete-if-empty",
action="store_true", dest="delete", default=False,
help="delete the task file if it becomes empty")
parser.add_option_group(config)
output = OptionGroup(parser, "Output Options")
output.add_option("-g", "--grep", dest="grep", default='',
help="print only tasks that contain WORD", metavar="WORD")
output.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print more detailed output (full task ids, etc)")
output.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="print less detailed output (no task ids, etc)")
output.add_option("--done",
action="store_true", dest="done", default=False,
help="list done tasks instead of unfinished ones")
parser.add_option_group(output)
return parser
def _main():
"""Run the command-line interface."""
(options, args) = _build_parser().parse_args()
td = TaskDict(taskdir=options.taskdir, name=options.name)
text = ' '.join(args).strip()
if '\n' in text:
_die('task text cannot contain newlines')
try:
if options.finish:
td.finish_task(options.finish, force=options.force)
td.write(options.delete)
elif options.remove:
td.remove_task(options.remove, force=options.force)
td.write(options.delete)
elif options.edit:
td.edit_task(options.edit, text)
td.write(options.delete)
elif options.tag:
td.tag(options.tag, text)
td.write(options.delete)
elif text:
td.add_task(text, verbose=options.verbose, quiet=options.quiet, task_id=options.add, parent_id=options.sub)
td.write(options.delete)
else:
kind = 'tasks' if not options.done else 'done'
td.print_list(kind=kind, verbose=options.verbose, quiet=options.quiet,
grep=options.grep)
except AmbiguousPrefix:
e = sys.exc_info()[1]
_die('the ID "%s" matches more than one task' % e.prefix)
except UnknownPrefix:
e = sys.exc_info()[1]
_die('the ID "%s" does not match any task' % e.prefix)
except BadFile as e:
_die('%s - %s' % (e.problem, e.path))
if __name__ == '__main__':
_main()
|
py | b4069289fec750daea8d55ff7dc280b4c3fc1e33 | def soma(num1, num2):
return num1 + num2 |
py | b4069292212e0ff216d53c39be813abbced3d261 | import torch
import random
import torch.nn as nn
import numpy as np
def global_local_temporal_contrastive(lsr,gdr, temperature):
#lsr denotes local sparse-clip representation= representation of temporal slice of global clip
#gdr denotes global dense-clip representation= representation of global(pooled) feature of local clip
#lsr,gdr shape should be [BS,4,128]
similarity_matrix = torch.bmm(lsr, gdr.permute(0,2,1)) # [BS, 4, 4]
# print(similarity_matrix)
similarity_matrix = torch.cat((similarity_matrix, similarity_matrix.permute(0,2,1)),dim=0) # [BS*2, 4, 4]
# print()
# print(similarity_matrix)
similarity_matrix = similarity_matrix.view(-1,4) # [BS*8, 4]
# print()
# print(similarity_matrix)
# print()
sample_lab = [0,1,2,3]
label = []
for i in range(lsr.shape[0]*2):
label.extend(sample_lab)
label = torch.from_numpy(np.asarray(label)).long().cuda()
similarity_matrix /= temperature
loss = nn.functional.cross_entropy(similarity_matrix, label, reduction='sum')
return loss/ (2*lsr.shape[0])
if __name__ == '__main__':
BS = 40
emb_size = 128
lsr = nn.functional.normalize(torch.rand(BS,4, emb_size),dim=2).cuda()
gdr = nn.functional.normalize(torch.rand(BS,4, emb_size),dim=2).cuda()
loss = global_local_temporal_contrastive(lsr, gdr, 0.1)
print(f'Loss is {loss}')
|
py | b406935051028034305fefc76fb6f7c1d7fe34ab | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.hyperflex_cluster_storage_policy_all_of import HyperflexClusterStoragePolicyAllOf # noqa: E501
from intersight.rest import ApiException
class TestHyperflexClusterStoragePolicyAllOf(unittest.TestCase):
"""HyperflexClusterStoragePolicyAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHyperflexClusterStoragePolicyAllOf(self):
"""Test HyperflexClusterStoragePolicyAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.hyperflex_cluster_storage_policy_all_of.HyperflexClusterStoragePolicyAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b406935e145510932ee9c90eeb73c985f0acf35d | ../stage.py
|
py | b40694542ec2d37208d876220f24acf45823b0f9 | # Copyright 2010-2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Python 3 compatibility tools (PRIVATE).
We currently have lines like this under Python 2 in order
to use iterator based zip, map and filter:
from future_builtins import zip
There is no similar option for range yet, other than:
range = xrange
input = raw_input
or:
from __builtin__ import xrange as range
from __builtin__ import raw_input as input
Under Python 3 this imports need to be removed. Also, deliberate
importing of built in functions like open changes from Python 2:
from __builtin__ import open
to this under Python 3:
from builtins import open
Instead, we can do this under either Python 2 or 3:
from Bio._py3k import open
from Bio._py3k import zip
Once we drop support for Python 2, the whole of Bio._py3k will
go away.
"""
import sys
if sys.version_info[0] >= 3:
#Code for Python 3
from builtins import open, zip, map, filter, range, input
import codecs
#Lots of our Python 2 code uses isinstance(x, basestring)
#which after 2to3 becomes isinstance(x, str)
basestring = str
unicode = str
_bytes_to_string = lambda b: b.decode() # bytes to unicode string
_string_to_bytes = lambda s: s.encode() # unicode string to bytes
def _as_unicode(s):
"""Turn byte string or unicode string into a unicode string."""
if isinstance(s, str):
return s
#Assume it is a bytes string
#Note ISO-8859-1 aka Latin-1 preserves first 256 chars
return codecs.latin_1_decode(s)[0]
def _as_bytes(s):
"""Turn byte string or unicode string into a bytes string.
The Python 2 version returns a (byte) string.
"""
if isinstance(s, bytes):
return s
#Assume it is a unicode string
#Note ISO-8859-1 aka Latin-1 preserves first 256 chars
return codecs.latin_1_encode(s)[0]
_as_string = _as_unicode
def _is_int_or_long(i):
"""Check if the value is an integer.
Note there are no longs on Python 3.
"""
return isinstance(i, int)
import io
def _binary_to_string_handle(handle):
"""Treat a binary (bytes) handle like a text (unicode) handle."""
#See also http://bugs.python.org/issue5628
#and http://bugs.python.org/issue13541
#and http://bugs.python.org/issue13464 which should be fixed in Python 3.3
#return io.TextIOWrapper(io.BufferedReader(handle))
#TODO - Re-evaluate this workaround under Python 3.3
#(perhaps we will only need it on Python 3.1 and 3.2?)
class EvilHandleHack(object):
def __init__(self, handle):
self._handle = handle
def read(self, length=None):
return _as_string(self._handle.read(length))
def readline(self):
return _as_string(self._handle.readline())
def __iter__(self):
for line in self._handle:
yield _as_string(line)
def close(self):
return self._handle.close()
def seek(self, pos):
return self._handle.seek(pos)
def tell(self):
return self._handle.tell()
return EvilHandleHack(handle)
#On Python 3, can depend on OrderedDict being present:
from collections import OrderedDict
#On Python 3, this will be a unicode StringIO
from io import StringIO
#On Python 3 urllib, urllib2, and urlparse were merged:
from urllib.request import urlopen, Request, urlretrieve, urlparse
from urllib.parse import urlencode, quote
from urllib.error import HTTPError
else:
#Python 2 code
from __builtin__ import open, basestring, unicode
#Import Python3 like iterator functions:
from future_builtins import zip, map, filter
from __builtin__ import xrange as range
from __builtin__ import raw_input as input
_bytes_to_string = lambda b: b # bytes to string, i.e. do nothing
_string_to_bytes = lambda s: str(s) # str (or unicode) to bytes string
def _as_unicode(s):
"""Turn a (byte) string or a unicode string into a (byte) string."""
#Will be changed by 2to3 to "isinstance(s, str)" but doesn't matter:
if isinstance(s, unicode):
return s
return s.decode()
def _as_bytes(s):
"""Turn a (byte) string or a unicode string into a (byte) string."""
return str(s)
_as_string = _as_bytes
def _is_int_or_long(i):
"""Check if the value is an integer or long."""
return isinstance(i, (int, long))
def _binary_to_string_handle(handle):
"""Treat a binary handle like a text handle."""
return handle
try:
#Present on Python 2.7
from collections import OrderedDict
except ImportError:
try:
#Raymond Hettinger's backport available on PyPI
from ordereddict import OrderedDict
except ImportError:
#Use our bundled copy instead
from ._ordereddict import OrderedDict
# On Python 2 this will be a (bytes) string based handle.
# Note this doesn't work as it is unicode based:
# from io import StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
#Under urllib.request on Python 3:
from urllib2 import urlopen, Request
from urllib import urlretrieve
from urlparse import urlparse
#Under urllib.parse on Python 3:
from urllib import urlencode, quote
#Under urllib.error on Python 3:
from urllib2 import HTTPError
if sys.platform == "win32":
# Can't use commands.getoutput on Python 2, Unix only/broken:
# http://bugs.python.org/issue15073
# Can't use subprocess.getoutput on Python 3, Unix only/broken:
# http://bugs.python.org/issue10197
def getoutput(cmd):
import subprocess
child = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
shell=False)
stdout, stderr = child.communicate()
# Remove trailing \n to match the Unix function,
return stdout.rstrip("\n")
elif sys.version_info[0] >= 3:
# Use subprocess.getoutput on Python 3,
from subprocess import getoutput
else:
# Use commands.getoutput on Python 2,
from commands import getoutput
|
py | b40695bd945490d36300ffd7abba1d1632ffbf0a | from time import sleep
i=0
while True:
print(i)
i+=1
sleep(3)
|
py | b406970ebeeb05dfd631e77b042e3877b2e4c478 | # -*- coding: utf-8 -*-
"""Top-level package for led_tester."""
__author__ = """Thomas Anderson"""
__email__ = '[email protected]'
__version__ = '0.1.0'
|
py | b406978cdedb0a49c5e931125ea687fd92432ab7 | _base_ = [
'../_base_/models/mocov2.py',
'../_base_/datasets/imagenet30p_mocov2_b128.py',
'../_base_/schedules/sgd_coslr-200e_in1k.py',
'../_base_/default_runtime.py',
]
# runtime settings
# the max_keep_ckpts controls the max number of ckpt file in your work_dirs
# if it is 3, when CheckpointHook (in mmcv) saves the 4th ckpt
# it will remove the oldest one to keep the number of total ckpts as 3
checkpoint_config = dict(interval=10, max_keep_ckpts=3)
|
py | b40697df3ee456846b7c62bca1b9c1c95a9efc38 | #!/usr/bin/env python
"""
Usage:
jip [--loglevel <level>] [-p] <command> [<args>...]
jip [--version] [--help]
Options:
-p, --pipeline the file contains a pipeline (interpreter mode)
-h --help Show this help message
--version Show the version information
--loglevel <level> Set the JIP log level to one of error|warn|info|debug
Commands
========
run Locally run a jip script
submit submit a jip script to a remote cluster
bash Run or submit a bash command
pipe Run or submit a pipeline command
List and query jobs
===================
jobs list and update jobs from the job database
Manipulate jobs
===============
delete delete the selected jobs
archive archive the selected jobs
cancel cancel selected and running jobs
hold put selected jobs on hold
restart restart selected jobs
logs show log files of jobs
edit edit job commands for a given job
show show job options and command for jobs
Miscellaneous
=============
tools list all tools available through the search paths
profiles list all available profiles
specs create a spec file for a given pipeline
clean remove job logs
check check job status
server start the jip grid server
Documentation, bug-reports and feedback
---------------------------------------
If you discover any issues, please open a bug report in the JIP issue tracker.
Documentation: http://pyjip.rtfd.org
Source Code : https://github.com/thasso/pyjip/
Issue Tracker: https://github.com/thasso/pyjip/issues
"""
import os
import sys
import jip
import jip.options
import jip.tools
import jip.cli
import jip.cluster
import jip.configuration
import jip.templates
from jip.logger import getLogger, log_level
from jip.vendor.docopt import docopt
log = getLogger('jip.cli.jip_main')
def main():
try:
jip.configuration.install_path = os.path.abspath(
os.path.dirname(sys.argv[0])
)
except:
pass
try:
_main()
except jip.options.ParserException as err:
log.debug("parser error: %s", str(err), exc_info=True)
sys.stderr.write(str(err))
sys.exit(1)
except jip.ValidationError as va:
log.debug("validation error: %s", str(va), exc_info=True)
sys.stderr.write(str(va))
sys.stderr.write("\n")
sys.exit(1)
except jip.templates.RenderError as va:
log.debug("render error: %s", str(va), exc_info=True)
sys.stderr.write(str(va))
sys.stderr.write("\n")
sys.exit(1)
except jip.tools.ToolNotFoundException as notFound:
log.debug("Tool not found: %s", str(notFound), exc_info=True)
print >>sys.stderr, jip.cli.colorize(str(notFound), jip.cli.RED)
print >>sys.stderr, """\
Check your search paths and your jip configuration to include and
find tool definitions that are not in any default paths.
"""
sys.exit(1)
except jip.cluster.ClusterImplementationError as notFound:
log.debug("Cluster not found: %s", str(notFound), exc_info=True)
print >>sys.stderr, jip.cli.colorize(str(notFound), jip.cli.RED)
sys.exit(1)
except jip.cluster.SubmissionError as notFound:
log.debug("Submission error: %s", str(notFound), exc_info=True)
print >>sys.stderr, jip.cli.colorize(str(notFound), jip.cli.RED)
sys.exit(1)
def _main():
version = str(jip.__version__)
args = docopt(__doc__, version=version,
options_first=True, help=True)
if args['--loglevel']:
log_level(args['--loglevel'])
cmd = args['<command>']
if not cmd:
docopt(__doc__, version=version, options_first=True, argv=['--help'],
help=True)
sys.exit(1)
try:
import runpy
argv = ["jip-" + cmd] + args['<args>']
sys.argv = argv # reset options
runpy.run_module("jip.cli.jip_%s" % cmd, run_name="__main__")
except ImportError:
log.debug("Import error, trying command. Here is the exception:",
exc_info=True)
# check interpreter mode
import os
if os.path.exists(cmd):
import runpy
argv = ["jip-interpreter"] + \
([] if not args['--pipeline'] else ['--pipeline']) + \
[cmd] + args['<args>']
sys.argv = argv # reset options
runpy.run_module("jip.cli.jip_interpreter", run_name="__main__")
else:
sys.stderr.write("\nCommand %s not found\n\n" % (cmd))
docopt(__doc__, version=version, options_first=True,
argv=['--help'], help=True)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
if __name__ == "__main__":
main()
|
py | b40699a159402e02285ee3723668239c3335d148 | # -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
""" Provides utility functions to build a Fledge Support bundle.
"""
import logging
import datetime
import platform
import os
from os.path import basename
import glob
import sys
import shutil
import json
import tarfile
import fnmatch
import subprocess
from fledge.services.core.connect import *
from fledge.common import logger
from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA
from fledge.common.configuration_manager import ConfigurationManager
from fledge.common.plugin_discovery import PluginDiscovery
from fledge.common.storage_client import payload_builder
from fledge.services.core.api.service import get_service_records, get_service_installed
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_LOGGER = logger.setup(__name__, level=logging.INFO)
_NO_OF_FILES_TO_RETAIN = 3
_SYSLOG_FILE = '/var/log/syslog'
_PATH = _FLEDGE_DATA if _FLEDGE_DATA else _FLEDGE_ROOT + '/data'
if ('centos' in platform.platform()) or ('redhat' in platform.platform()):
_SYSLOG_FILE = '/var/log/messages'
class SupportBuilder:
_out_file_path = None
_interim_file_path = None
_storage = None
def __init__(self, support_dir):
try:
if not os.path.exists(support_dir):
os.makedirs(support_dir)
else:
self.check_and_delete_bundles(support_dir)
self._out_file_path = support_dir
self._interim_file_path = support_dir
self._storage = get_storage_async() # from fledge.services.core.connect
except (OSError, Exception) as ex:
_LOGGER.error("Error in initializing SupportBuilder class: %s ", str(ex))
raise RuntimeError(str(ex))
async def build(self):
try:
today = datetime.datetime.now()
file_spec = today.strftime('%y%m%d-%H-%M-%S')
tar_file_name = self._out_file_path+"/"+"support-{}.tar.gz".format(file_spec)
pyz = tarfile.open(tar_file_name, "w:gz")
try:
await self.add_fledge_version_and_schema(pyz)
self.add_syslog_fledge(pyz, file_spec)
self.add_syslog_storage(pyz, file_spec)
cf_mgr = ConfigurationManager(self._storage)
try:
south_cat = await cf_mgr.get_category_child("South")
south_categories = [sc["key"] for sc in south_cat]
for service in south_categories:
self.add_syslog_service(pyz, file_spec, service)
except:
pass
try:
north_cat = await cf_mgr.get_category_child("North")
north_categories = [nc["key"] for nc in north_cat]
for task in north_categories:
if task != "OMF_TYPES":
self.add_syslog_service(pyz, file_spec, task)
except:
pass
await self.add_table_configuration(pyz, file_spec)
await self.add_table_audit_log(pyz, file_spec)
await self.add_table_schedules(pyz, file_spec)
await self.add_table_scheduled_processes(pyz, file_spec)
await self.add_table_statistics_history(pyz, file_spec)
await self.add_table_plugin_data(pyz, file_spec)
await self.add_table_streams(pyz, file_spec)
self.add_service_registry(pyz, file_spec)
self.add_machine_resources(pyz, file_spec)
self.add_psinfo(pyz, file_spec)
self.add_script_dir_content(pyz)
self.add_package_log_dir_content(pyz)
self.add_software_list(pyz, file_spec)
finally:
pyz.close()
except Exception as ex:
_LOGGER.error("Error in creating Support .tar.gz file: %s ", str(ex))
raise RuntimeError(str(ex))
self.check_and_delete_temp_files(self._interim_file_path)
_LOGGER.info("Support bundle %s successfully created.", tar_file_name)
return tar_file_name
def check_and_delete_bundles(self, support_dir):
files = glob.glob(support_dir + "/" + "support*.tar.gz")
files.sort(key=os.path.getmtime)
if len(files) >= _NO_OF_FILES_TO_RETAIN:
for f in files[:-2]:
if os.path.isfile(f):
os.remove(os.path.join(support_dir, f))
def check_and_delete_temp_files(self, support_dir):
# Delete all non *.tar.gz files
for f in os.listdir(support_dir):
if not fnmatch.fnmatch(f, 'support*.tar.gz'):
os.remove(os.path.join(support_dir, f))
def write_to_tar(self, pyz, temp_file, data):
with open(temp_file, 'w') as outfile:
json.dump(data, outfile, indent=4)
pyz.add(temp_file, arcname=basename(temp_file))
async def add_fledge_version_and_schema(self, pyz):
temp_file = self._interim_file_path + "/" + "fledge-info"
with open('{}/VERSION'.format(_FLEDGE_ROOT)) as f:
lines = [line.rstrip() for line in f]
self.write_to_tar(pyz, temp_file, lines)
def add_syslog_fledge(self, pyz, file_spec):
# The fledge entries from the syslog file
temp_file = self._interim_file_path + "/" + "syslog-{}".format(file_spec)
try:
subprocess.call("grep -a '{}' {} > {}".format("Fledge", _SYSLOG_FILE, temp_file), shell=True)
except OSError as ex:
raise RuntimeError("Error in creating {}. Error-{}".format(temp_file, str(ex)))
pyz.add(temp_file, arcname=basename(temp_file))
def add_syslog_storage(self, pyz, file_spec):
# The contents of the syslog file that relate to the database layer (postgres)
temp_file = self._interim_file_path + "/" + "syslogStorage-{}".format(file_spec)
try:
subprocess.call("grep -a '{}' {} > {}".format("Fledge Storage", _SYSLOG_FILE, temp_file), shell=True)
except OSError as ex:
raise RuntimeError("Error in creating {}. Error-{}".format(temp_file, str(ex)))
pyz.add(temp_file, arcname=basename(temp_file))
def add_syslog_service(self, pyz, file_spec, service):
# The fledge entries from the syslog file for a service or task
# Replace space occurrences with hyphen for service or task - so that file is created
tmp_svc = service.replace(' ', '-')
temp_file = self._interim_file_path + "/" + "syslog-{}-{}".format(tmp_svc, file_spec)
try:
subprocess.call("grep -a -E '(Fledge {})\[' {} > {}".format(service, _SYSLOG_FILE, temp_file), shell=True)
pyz.add(temp_file, arcname=basename(temp_file))
except Exception as ex:
raise RuntimeError("Error in creating {}. Error-{}".format(temp_file, str(ex)))
async def add_table_configuration(self, pyz, file_spec):
# The contents of the configuration table from the storage layer
temp_file = self._interim_file_path + "/" + "configuration-{}".format(file_spec)
data = await self._storage.query_tbl("configuration")
self.write_to_tar(pyz, temp_file, data)
async def add_table_audit_log(self, pyz, file_spec):
# The contents of the audit log from the storage layer
temp_file = self._interim_file_path + "/" + "audit-{}".format(file_spec)
data = await self._storage.query_tbl("log")
self.write_to_tar(pyz, temp_file, data)
async def add_table_schedules(self, pyz, file_spec):
# The contents of the schedules table from the storage layer
temp_file = self._interim_file_path + "/" + "schedules-{}".format(file_spec)
data = await self._storage.query_tbl("schedules")
self.write_to_tar(pyz, temp_file, data)
async def add_table_scheduled_processes(self, pyz, file_spec):
temp_file = self._interim_file_path + "/" + "scheduled_processes-{}".format(file_spec)
data = await self._storage.query_tbl("scheduled_processes")
self.write_to_tar(pyz, temp_file, data)
async def add_table_statistics_history(self, pyz, file_spec):
# The contents of the statistics history from the storage layer
temp_file = self._interim_file_path + "/" + "statistics-history-{}".format(file_spec)
payload = payload_builder.PayloadBuilder() \
.LIMIT(1000) \
.ORDER_BY(['history_ts', 'DESC']) \
.payload()
data = await self._storage.query_tbl_with_payload("statistics_history", payload)
self.write_to_tar(pyz, temp_file, data)
async def add_table_plugin_data(self, pyz, file_spec):
# The contents of the plugin_data from the storage layer
temp_file = self._interim_file_path + "/" + "plugin-data-{}".format(file_spec)
payload = payload_builder.PayloadBuilder() \
.LIMIT(1000) \
.ORDER_BY(['key', 'ASC']) \
.payload()
data = await self._storage.query_tbl_with_payload("plugin_data", payload)
self.write_to_tar(pyz, temp_file, data)
async def add_table_streams(self, pyz, file_spec):
# The contents of the streams from the storage layer
temp_file = self._interim_file_path + "/" + "streams-{}".format(file_spec)
payload = payload_builder.PayloadBuilder() \
.LIMIT(1000) \
.ORDER_BY(['id', 'ASC']) \
.payload()
data = await self._storage.query_tbl_with_payload("streams", payload)
self.write_to_tar(pyz, temp_file, data)
def add_service_registry(self, pyz, file_spec):
# The contents of the service registry
temp_file = self._interim_file_path + "/" + "service_registry-{}".format(file_spec)
data = {
"about": "Service Registry",
"serviceRegistry": get_service_records()
}
self.write_to_tar(pyz, temp_file, data)
def add_machine_resources(self, pyz, file_spec):
# Details of machine resources, memory size, amount of available memory, storage size and amount of free storage
temp_file = self._interim_file_path + "/" + "machine-{}".format(file_spec)
total, used, free = shutil.disk_usage("/")
memory = subprocess.Popen('free -h', shell=True, stdout=subprocess.PIPE).stdout.readlines()[1].split()[1:]
data = {
"about": "Machine resources",
"platform": sys.platform,
"totalMemory": memory[0].decode(),
"usedMemory": memory[1].decode(),
"freeMemory": memory[2].decode(),
"totalDiskSpace_MB": int(total / (1024 * 1024)),
"usedDiskSpace_MB": int(used / (1024 * 1024)),
"freeDiskSpace_MB": int(free / (1024 * 1024)),
}
self.write_to_tar(pyz, temp_file, data)
def add_psinfo(self, pyz, file_spec):
# A PS listing of al the python applications running on the machine
temp_file = self._interim_file_path + "/" + "psinfo-{}".format(file_spec)
a = subprocess.Popen('ps -aufx | egrep "(%MEM|fledge\.)" | grep -v grep', shell=True,
stdout=subprocess.PIPE).stdout.readlines()
c = [b.decode() for b in a] # Since "a" contains return value in bytes, convert it to string
c_tasks = subprocess.Popen('ps -aufx | grep "./tasks" | grep -v grep', shell=True,
stdout=subprocess.PIPE).stdout.readlines()
c_tasks_decode = [t.decode() for t in c_tasks]
if c_tasks_decode:
c.extend(c_tasks_decode)
# Remove "/n" from the c list output
data = {
"runningProcesses": list(map(str.strip, c))
}
self.write_to_tar(pyz, temp_file, data)
def add_script_dir_content(self, pyz):
script_file_path = _PATH + '/scripts'
if os.path.exists(script_file_path):
# recursively 'true' by default and __pycache__ dir excluded
pyz.add(script_file_path, arcname='scripts', filter=self.exclude_pycache)
def add_package_log_dir_content(self, pyz):
script_package_logs_path = _PATH + '/logs'
if os.path.exists(script_package_logs_path):
# recursively 'true' by default and __pycache__ dir excluded
pyz.add(script_package_logs_path, arcname='package_logs', filter=self.exclude_pycache)
def add_software_list(self, pyz, file_spec) -> None:
data = {
"plugins": PluginDiscovery.get_plugins_installed(),
"services": get_service_installed()
}
temp_file = self._interim_file_path + "/" + "software-{}".format(file_spec)
self.write_to_tar(pyz, temp_file, data)
def exclude_pycache(self, tar_info):
return None if '__pycache__' in tar_info.name else tar_info
|
py | b40699d3cad95c5740a4e33c48371f8f556359f9 | #
# PySNMP MIB module AVAYAGEN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AVAYAGEN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:32:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, Gauge32, Counter64, IpAddress, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier, Unsigned32, ObjectIdentity, enterprises, Integer32, iso, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "Counter64", "IpAddress", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier", "Unsigned32", "ObjectIdentity", "enterprises", "Integer32", "iso", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
avaya = ModuleIdentity((1, 3, 6, 1, 4, 1, 6889))
avaya.setRevisions(('1909-12-19 10:00', '1904-01-27 09:00', '1902-08-15 09:00', '1902-07-28 09:00', '1901-08-09 17:00', '1901-06-21 11:55', '1900-10-15 10:45', '1900-10-15 13:05',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: avaya.setRevisionsDescriptions(('Rev 1.4.1 - Nick Saparoff. rename mibs to avayaMibs. rename products to avayaProducts. ', 'Rev 1.4.0 - Meir Deutsch. adds avGatewayProducts under avayaProducts. adds avGatewayMibs under avayaMibs. ', 'Rev 1.3.0 - Itai Zilbershterin. adds avayaSystemStats under lsg. ', 'Rev 1.2.0 - Itai Zilbershterin. adds avayaEISTopology under lsg. ', 'Rev 1.1.0 - Itai Zilbershterin. adds products OID to those defined. ', 'Rev 1.0.0 - Itai Zilbershterin. Fixed the mibs placement error. Avaya Mibs reside under avaya.2 and not avaya.1. The MIB branch is called avayaMibs.', 'Rev 0.9.0 - Itai Zilbershterin. The initial version of this MIB module. The following Organizational top-level groups are defined: lsg - Mibs of the LAN System Group (Concord & Israel).', "Rev 0.9.1 - Itai Zilbershterin. Dates in Revisions changed from 'yyyymmddhhmm' to 'yymmddhhmm', to support older development environments.",))
if mibBuilder.loadTexts: avaya.setLastUpdated('0401270900Z')
if mibBuilder.loadTexts: avaya.setOrganization('Avaya Inc.')
if mibBuilder.loadTexts: avaya.setContactInfo('Avaya Customer Services Postal: Avaya, Inc. 211 Mt Airy Rd. Basking Ridge, NJ 07920 USA Tel: +1 908 953 6000 WWW: http://www.avaya.com ')
if mibBuilder.loadTexts: avaya.setDescription('Avaya top-level OID tree. This MIB module deals defines the Avaya enterprise-specific tree. Development organizations within Avaya who wish to register MIBs under the Avaya enterprise OID, should: a. Contact the maintainer of this module, and get an organization OID and group OID. b. Import the definition of their Organization OID from this MIB. ')
avayaProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 1))
avayaMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2))
avGatewayProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 1, 6))
avGatewayMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 6))
lsg = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1))
avayaEISTopology = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1, 10))
avayaSystemStats = MibIdentifier((1, 3, 6, 1, 4, 1, 6889, 2, 1, 11))
mibBuilder.exportSymbols("AVAYAGEN-MIB", avayaMibs=avayaMibs, avayaEISTopology=avayaEISTopology, avGatewayMibs=avGatewayMibs, lsg=lsg, avayaProducts=avayaProducts, avayaSystemStats=avayaSystemStats, PYSNMP_MODULE_ID=avaya, avGatewayProducts=avGatewayProducts, avaya=avaya)
|
py | b40699fbf0cca01b55220c350f7dade474b5a501 | from typing import Any, Dict, Iterable, List
from ...models.models import Mediafile
from ...shared.patterns import Collection, FullQualifiedId
from ..base import ActionPayload
from ..default_schema import DefaultSchema
from ..generics import DeleteAction
from ..register import register_action
@register_action("mediafile.delete")
class MediafileDelete(DeleteAction):
"""
Action to delete a user.
"""
model = Mediafile()
schema = DefaultSchema(Mediafile()).get_delete_schema()
def get_updated_instances(self, payload: ActionPayload) -> Iterable[Dict[str, Any]]:
new_payload = []
for instance in payload:
new_payload.extend(
[{"id": id_} for id_ in self.get_tree_ids(instance["id"])]
)
return new_payload
def get_tree_ids(self, id_: int) -> List[int]:
tree_ids = [id_]
node = self.database.get(
FullQualifiedId(Collection("mediafile"), id_), ["child_ids"]
)
if node.get("child_ids"):
for child_id in node["child_ids"]:
tree_ids.extend(self.get_tree_ids(child_id))
return tree_ids
|
py | b4069bf1db38512095236c874d67315fcb1dd780 | """Test the coverage plugin."""
import os
import sys
import unittest
import shutil
from nose.plugins import PluginTester
from nose.plugins.cover import Coverage
support = os.path.join(os.path.dirname(__file__), 'support')
try:
import coverage
# Python 3.3 may accidentally pick up our support area when running the unit
# tests. Look for the coverage attribute to make sure we've got the right
# package.
hasCoverage = hasattr(coverage, 'coverage')
except ImportError:
hasCoverage = False
class TestCoveragePlugin(PluginTester, unittest.TestCase):
activate = "--with-coverage"
args = ['-v', '--cover-package=blah', '--cover-html', '--cover-min-percentage', '25']
plugins = [Coverage()]
suitepath = os.path.join(support, 'coverage')
def setUp(self):
if not hasCoverage:
raise unittest.SkipTest('coverage not available; skipping')
self.cover_file = os.path.join(os.getcwd(), '.coverage')
self.cover_html_dir = os.path.join(os.getcwd(), 'cover')
if os.path.exists(self.cover_file):
os.unlink(self.cover_file)
if os.path.exists(self.cover_html_dir):
shutil.rmtree(self.cover_html_dir)
super(TestCoveragePlugin, self).setUp()
def runTest(self):
self.assertTrue("blah 4 3 25% 1" in self.output)
self.assertTrue("Ran 1 test in" in self.output)
# Assert coverage html report exists
self.assertTrue(os.path.exists(os.path.join(self.cover_html_dir,
'index.html')))
# Assert coverage data is saved
self.assertTrue(os.path.exists(self.cover_file))
class TestCoverageMinPercentagePlugin(PluginTester, unittest.TestCase):
activate = "--with-coverage"
args = ['-v', '--cover-package=blah', '--cover-min-percentage', '100']
plugins = [Coverage()]
suitepath = os.path.join(support, 'coverage')
def setUp(self):
if not hasCoverage:
raise unittest.SkipTest('coverage not available; skipping')
self.cover_file = os.path.join(os.getcwd(), '.coverage')
self.cover_html_dir = os.path.join(os.getcwd(), 'cover')
if os.path.exists(self.cover_file):
os.unlink(self.cover_file)
if os.path.exists(self.cover_html_dir):
shutil.rmtree(self.cover_html_dir)
self.assertRaises(SystemExit,
super(TestCoverageMinPercentagePlugin, self).setUp)
def runTest(self):
pass
class TestCoverageMinPercentageSinglePackagePlugin(
PluginTester, unittest.TestCase):
activate = "--with-coverage"
args = ['-v', '--cover-package=blah', '--cover-html',
'--cover-min-percentage', '100']
plugins = [Coverage()]
suitepath = os.path.join(support, 'coverage')
def setUp(self):
if not hasCoverage:
raise unittest.SkipTest('coverage not available; skipping')
self.cover_file = os.path.join(os.getcwd(), '.coverage')
self.cover_html_dir = os.path.join(os.getcwd(), 'cover')
if os.path.exists(self.cover_file):
os.unlink(self.cover_file)
if os.path.exists(self.cover_html_dir):
shutil.rmtree(self.cover_html_dir)
self.assertRaises(SystemExit,
super(TestCoverageMinPercentageSinglePackagePlugin,
self).setUp)
def runTest(self):
pass
class TestCoverageMinPercentageSinglePackageWithBranchesPlugin(
PluginTester, unittest.TestCase):
activate = "--with-coverage"
args = ['-v', '--cover-package=blah', '--cover-branches',
'--cover-html', '--cover-min-percentage', '100']
plugins = [Coverage()]
suitepath = os.path.join(support, 'coverage')
def setUp(self):
if not hasCoverage:
raise unittest.SkipTest('coverage not available; skipping')
self.cover_file = os.path.join(os.getcwd(), '.coverage')
self.cover_html_dir = os.path.join(os.getcwd(), 'cover')
if os.path.exists(self.cover_file):
os.unlink(self.cover_file)
if os.path.exists(self.cover_html_dir):
shutil.rmtree(self.cover_html_dir)
self.assertRaises(
SystemExit,
super(TestCoverageMinPercentageSinglePackageWithBranchesPlugin,
self).setUp)
def runTest(self):
pass
class TestCoverageMinPercentageTOTALPlugin(PluginTester, unittest.TestCase):
activate = "--with-coverage"
args = ['-v', '--cover-package=blah', '--cover-package=moo',
'--cover-min-percentage', '100']
plugins = [Coverage()]
suitepath = os.path.join(support, 'coverage2')
def setUp(self):
if not hasCoverage:
raise unittest.SkipTest('coverage not available; skipping')
self.cover_file = os.path.join(os.getcwd(), '.coverage')
self.cover_html_dir = os.path.join(os.getcwd(), 'cover')
if os.path.exists(self.cover_file):
os.unlink(self.cover_file)
if os.path.exists(self.cover_html_dir):
shutil.rmtree(self.cover_html_dir)
self.assertRaises(SystemExit,
super(TestCoverageMinPercentageTOTALPlugin, self).setUp)
def runTest(self):
pass
class TestCoverageMinPercentageWithBranchesTOTALPlugin(
PluginTester, unittest.TestCase):
activate = "--with-coverage"
args = ['-v', '--cover-package=blah', '--cover-package=moo',
'--cover-branches', '--cover-min-percentage', '100']
plugins = [Coverage()]
suitepath = os.path.join(support, 'coverage2')
def setUp(self):
if not hasCoverage:
raise unittest.SkipTest('coverage not available; skipping')
self.cover_file = os.path.join(os.getcwd(), '.coverage')
self.cover_html_dir = os.path.join(os.getcwd(), 'cover')
if os.path.exists(self.cover_file):
os.unlink(self.cover_file)
if os.path.exists(self.cover_html_dir):
shutil.rmtree(self.cover_html_dir)
self.assertRaises(
SystemExit,
super(TestCoverageMinPercentageWithBranchesTOTALPlugin, self).setUp)
def runTest(self):
pass
if __name__ == '__main__':
unittest.main()
|
py | b4069cae06b81fd0d4401ac235b9b8db454f84bb | '''
Script installation resources. Modified from setuptools/easy_install.py.
'''
import os
import re
import sys
import pkg_resources
SCRIPT_TEXT = '''# PYG-ENTRY-SCRIPT: {spec!r},{group!r},{name!r}
__requires__ = {spec!r}
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point({spec!r}, {group!r}, {name!r})()
)'''
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
def nt_quote_arg(arg):
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb * 2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def get_script_header(script_text, executable=sys.executable):
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
first = (script_text + '\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options:
options = ' ' + options
executable = nt_quote_arg(executable)
hdr = "#!{0}{1}\n".format(executable, options)
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x' + options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
hdr = "#!{0}{1}\n".format(executable, options)
return hdr
def script_args(dist):
spec = dist.as_req
header = get_script_header("", sys.executable)
for group in 'console_scripts', 'gui_scripts':
for name, ep in dist.entry_points_map(group).items():
script_text = SCRIPT_TEXT.format(**locals())
if sys.platform == 'win32':
# On Windows/wininst, add a .py extension and an .exe launcher
if group == 'gui_scripts':
ext, launcher = '-script.pyw', 'gui.exe'
new_header = re.sub('(?i)python.exe', 'pythonw.exe', header)
else:
ext, launcher = '-script.py', 'cli.exe'
new_header = re.sub('(?i)pythonw.exe', 'python.exe', header)
if os.path.exists(new_header[2:-1]):
hdr = new_header
else:
hdr = header
yield (name + ext, hdr + script_text, 't')
yield (
name + '.exe', pkg_resources.resource_string('setuptools', launcher),
'b' # write in binary mode
)
else:
# On other platforms, we assume the right thing to do is to
# just write the stub with no extension.
yield (name, header + script_text, '') |
py | b4069d56f5712e896abf2328d0df418fcadf1225 | """Configuration file parsing and utilities."""
import copy
import itertools
import os
from collections import Set, namedtuple
from re import compile as re
try: # Python 3.x
from ConfigParser import RawConfigParser
except ImportError: # Python 2.x
from configparser import RawConfigParser
from .utils import __version__, log
from .violations import ErrorRegistry, conventions
def check_initialized(method):
"""Check that the configuration object was initialized."""
def _decorator(self, *args, **kwargs):
if self._arguments is None or self._options is None:
raise RuntimeError('using an uninitialized configuration')
return method(self, *args, **kwargs)
return _decorator
class ConfigurationParser(object):
"""Responsible for parsing configuration from files and CLI.
There are 2 types of configurations: Run configurations and Check
configurations.
Run Configurations:
------------------
Responsible for deciding things that are related to the user interface and
configuration discovery, e.g. verbosity, debug options, etc.
All run configurations default to `False` or `None` and are decided only
by CLI.
Check Configurations:
--------------------
Configurations that are related to which files and errors will be checked.
These are configurable in 2 ways: using the CLI, and using configuration
files.
Configuration files are nested within the file system, meaning that the
closer a configuration file is to a checked file, the more relevant it will
be. For instance, imagine this directory structure:
A
+-- tox.ini: sets `select=D100`
+-- B
+-- foo.py
+-- tox.ini: sets `add-ignore=D100`
Then `foo.py` will not be checked for `D100`.
The configuration build algorithm is described in `self._get_config`.
Note: If any of `BASE_ERROR_SELECTION_OPTIONS` was selected in the CLI, all
configuration files will be ignored and each file will be checked for
the error codes supplied in the CLI.
"""
CONFIG_FILE_OPTIONS = ('convention', 'select', 'ignore', 'add-select',
'add-ignore', 'match', 'match-dir',
'ignore-decorators')
BASE_ERROR_SELECTION_OPTIONS = ('ignore', 'select', 'convention')
DEFAULT_MATCH_RE = '(?!test_).*\.py'
DEFAULT_MATCH_DIR_RE = '[^\.].*'
DEFAULT_IGNORE_DECORATORS_RE = ''
DEFAULT_CONVENTION = conventions.pep257
PROJECT_CONFIG_FILES = (
'setup.cfg',
'tox.ini',
'.pydocstyle',
'.pydocstyle.ini',
'.pydocstylerc',
'.pydocstylerc.ini',
# The following is deprecated, but remains for backwards compatibility.
'.pep257',
)
POSSIBLE_SECTION_NAMES = ('pydocstyle', 'pep257')
def __init__(self):
"""Create a configuration parser."""
self._cache = {}
self._override_by_cli = None
self._options = self._arguments = self._run_conf = None
self._parser = self._create_option_parser()
# ---------------------------- Public Methods -----------------------------
def get_default_run_configuration(self):
"""Return a `RunConfiguration` object set with default values."""
options, _ = self._parse_args([])
return self._create_run_config(options)
def parse(self):
"""Parse the configuration.
If one of `BASE_ERROR_SELECTION_OPTIONS` was selected, overrides all
error codes to check and disregards any error code related
configurations from the configuration files.
"""
self._options, self._arguments = self._parse_args()
self._arguments = self._arguments or ['.']
if not self._validate_options(self._options):
raise IllegalConfiguration()
self._run_conf = self._create_run_config(self._options)
config = self._create_check_config(self._options, use_defaults=False)
self._override_by_cli = config
@check_initialized
def get_user_run_configuration(self):
"""Return the run configuration for the script."""
return self._run_conf
@check_initialized
def get_files_to_check(self):
"""Generate files and error codes to check on each one.
Walk dir trees under `self._arguments` and generate yield filnames
that `match` under each directory that `match_dir`.
The method locates the configuration for each file name and yields a
tuple of (filename, [error_codes]).
With every discovery of a new configuration file `IllegalConfiguration`
might be raised.
"""
def _get_matches(config):
"""Return the `match` and `match_dir` functions for `config`."""
match_func = re(config.match + '$').match
match_dir_func = re(config.match_dir + '$').match
return match_func, match_dir_func
def _get_ignore_decorators(config):
"""Return the `ignore_decorators` as None or regex."""
if config.ignore_decorators: # not None and not ''
ignore_decorators = re(config.ignore_decorators)
else:
ignore_decorators = None
return ignore_decorators
for name in self._arguments:
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
config = self._get_config(root)
match, match_dir = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
# Skip any dirs that do not match match_dir
dirs[:] = [dir for dir in dirs if match_dir(dir)]
for filename in filenames:
if match(filename):
full_path = os.path.join(root, filename)
yield (full_path, list(config.checked_codes),
ignore_decorators)
else:
config = self._get_config(name)
match, _ = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
if match(name):
yield (name, list(config.checked_codes), ignore_decorators)
# --------------------------- Private Methods -----------------------------
def _get_config_by_discovery(self, node):
"""Get a configuration for checking `node` by config discovery.
Config discovery happens when no explicit config file is specified. The
file system is searched for config files starting from the directory
containing the file being checked, and up until the root directory of
the project.
See `_get_config` for further details.
"""
path = self._get_node_dir(node)
if path in self._cache:
return self._cache[path]
config_file = self._get_config_file_in_folder(path)
if config_file is None:
parent_dir, tail = os.path.split(path)
if tail:
# No configuration file, simply take the parent's.
config = self._get_config(parent_dir)
else:
# There's no configuration file and no parent directory.
# Use the default configuration or the one given in the CLI.
config = self._create_check_config(self._options)
else:
# There's a config file! Read it and merge if necessary.
options, inherit = self._read_configuration_file(config_file)
parent_dir, tail = os.path.split(path)
if tail and inherit:
# There is a parent dir and we should try to merge.
parent_config = self._get_config(parent_dir)
config = self._merge_configuration(parent_config, options)
else:
# No need to merge or parent dir does not exist.
config = self._create_check_config(options)
return config
def _get_config(self, node):
"""Get and cache the run configuration for `node`.
If no configuration exists (not local and not for the parent node),
returns and caches a default configuration.
The algorithm:
-------------
* If the current directory's configuration exists in
`self._cache` - return it.
* If a configuration file does not exist in this directory:
* If the directory is not a root directory:
* Cache its configuration as this directory's and return it.
* Else:
* Cache a default configuration and return it.
* Else:
* Read the configuration file.
* If a parent directory exists AND the configuration file
allows inheritance:
* Read the parent configuration by calling this function with the
parent directory as `node`.
* Merge the parent configuration with the current one and
cache it.
* If the user has specified one of `BASE_ERROR_SELECTION_OPTIONS` in
the CLI - return the CLI configuration with the configuration match
clauses
* Set the `--add-select` and `--add-ignore` CLI configurations.
"""
if self._run_conf.config is None:
log.debug('No config file specified, discovering.')
config = self._get_config_by_discovery(node)
else:
log.debug('Using config file %r', self._run_conf.config)
if not os.path.exists(self._run_conf.config):
raise IllegalConfiguration('Configuration file {!r} specified '
'via --config was not found.'
.format(self._run_conf.config))
if None in self._cache:
return self._cache[None]
options, _ = self._read_configuration_file(self._run_conf.config)
config = self._create_check_config(options)
# Make the CLI always win
final_config = {}
for attr in CheckConfiguration._fields:
cli_val = getattr(self._override_by_cli, attr)
conf_val = getattr(config, attr)
final_config[attr] = cli_val if cli_val is not None else conf_val
config = CheckConfiguration(**final_config)
self._set_add_options(config.checked_codes, self._options)
# Handle caching
if self._run_conf.config is not None:
self._cache[None] = config
else:
self._cache[self._get_node_dir(node)] = config
return config
@staticmethod
def _get_node_dir(node):
"""Return the absolute path of the directory of a filesystem node."""
path = os.path.abspath(node)
return path if os.path.isdir(path) else os.path.dirname(path)
def _read_configuration_file(self, path):
"""Try to read and parse `path` as a configuration file.
If the configurations were illegal (checked with
`self._validate_options`), raises `IllegalConfiguration`.
Returns (options, should_inherit).
"""
parser = RawConfigParser()
options = None
should_inherit = True
if parser.read(path) and self._get_section_name(parser):
option_list = dict([(o.dest, o.type or o.action)
for o in self._parser.option_list])
# First, read the default values
new_options, _ = self._parse_args([])
# Second, parse the configuration
section_name = self._get_section_name(parser)
for opt in parser.options(section_name):
if opt == 'inherit':
should_inherit = parser.getboolean(section_name, opt)
continue
if opt.replace('_', '-') not in self.CONFIG_FILE_OPTIONS:
log.warning("Unknown option '{}' ignored".format(opt))
continue
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = parser.getint(section_name, opt)
elif opt_type == 'string':
value = parser.get(section_name, opt)
else:
assert opt_type in ('store_true', 'store_false')
value = parser.getboolean(section_name, opt)
setattr(new_options, normalized_opt, value)
# Third, fix the set-options
options = self._fix_set_options(new_options)
if options is not None:
if not self._validate_options(options):
raise IllegalConfiguration('in file: {}'.format(path))
return options, should_inherit
def _merge_configuration(self, parent_config, child_options):
"""Merge parent config into the child options.
The migration process requires an `options` object for the child in
order to distinguish between mutually exclusive codes, add-select and
add-ignore error codes.
"""
# Copy the parent error codes so we won't override them
error_codes = copy.deepcopy(parent_config.checked_codes)
if self._has_exclusive_option(child_options):
error_codes = self._get_exclusive_error_codes(child_options)
self._set_add_options(error_codes, child_options)
kwargs = dict(checked_codes=error_codes)
for key in ('match', 'match_dir', 'ignore_decorators'):
kwargs[key] = \
getattr(child_options, key) or getattr(parent_config, key)
return CheckConfiguration(**kwargs)
def _parse_args(self, args=None, values=None):
"""Parse the options using `self._parser` and reformat the options."""
options, arguments = self._parser.parse_args(args, values)
return self._fix_set_options(options), arguments
@staticmethod
def _create_run_config(options):
"""Create a `RunConfiguration` object from `options`."""
values = dict([(opt, getattr(options, opt)) for opt in
RunConfiguration._fields])
return RunConfiguration(**values)
@classmethod
def _create_check_config(cls, options, use_defaults=True):
"""Create a `CheckConfiguration` object from `options`.
If `use_defaults`, any of the match options that are `None` will
be replaced with their default value and the default convention will be
set for the checked codes.
"""
checked_codes = None
if cls._has_exclusive_option(options) or use_defaults:
checked_codes = cls._get_checked_errors(options)
kwargs = dict(checked_codes=checked_codes)
for key in ('match', 'match_dir', 'ignore_decorators'):
kwargs[key] = getattr(cls, 'DEFAULT_{0}_RE'.format(key.upper())) \
if getattr(options, key) is None and use_defaults \
else getattr(options, key)
return CheckConfiguration(**kwargs)
@classmethod
def _get_section_name(cls, parser):
"""Parse options from relevant section."""
for section_name in cls.POSSIBLE_SECTION_NAMES:
if parser.has_section(section_name):
return section_name
return None
@classmethod
def _get_config_file_in_folder(cls, path):
"""Look for a configuration file in `path`.
If exists return its full path, otherwise None.
"""
if os.path.isfile(path):
path = os.path.dirname(path)
for fn in cls.PROJECT_CONFIG_FILES:
config = RawConfigParser()
full_path = os.path.join(path, fn)
if config.read(full_path) and cls._get_section_name(config):
return full_path
@classmethod
def _get_exclusive_error_codes(cls, options):
"""Extract the error codes from the selected exclusive option."""
codes = set(ErrorRegistry.get_error_codes())
checked_codes = None
if options.ignore is not None:
ignored = cls._expand_error_codes(options.ignore)
checked_codes = codes - ignored
elif options.select is not None:
checked_codes = cls._expand_error_codes(options.select)
elif options.convention is not None:
checked_codes = getattr(conventions, options.convention)
# To not override the conventions nor the options - copy them.
return copy.deepcopy(checked_codes)
@classmethod
def _set_add_options(cls, checked_codes, options):
"""Set `checked_codes` by the `add_ignore` or `add_select` options."""
checked_codes |= cls._expand_error_codes(options.add_select)
checked_codes -= cls._expand_error_codes(options.add_ignore)
@staticmethod
def _expand_error_codes(code_parts):
"""Return an expanded set of error codes to ignore."""
codes = set(ErrorRegistry.get_error_codes())
expanded_codes = set()
try:
for part in code_parts:
if len(part) < 4:
for code in codes:
if code.startswith(part):
expanded_codes.add(code)
else:
expanded_codes.add(part)
except TypeError as e:
raise IllegalConfiguration(e)
return expanded_codes
@classmethod
def _get_checked_errors(cls, options):
"""Extract the codes needed to be checked from `options`."""
checked_codes = cls._get_exclusive_error_codes(options)
if checked_codes is None:
checked_codes = cls.DEFAULT_CONVENTION
cls._set_add_options(checked_codes, options)
return checked_codes
@classmethod
def _validate_options(cls, options):
"""Validate the mutually exclusive options.
Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS`
was selected.
"""
for opt1, opt2 in \
itertools.permutations(cls.BASE_ERROR_SELECTION_OPTIONS, 2):
if getattr(options, opt1) and getattr(options, opt2):
log.error('Cannot pass both {} and {}. They are '
'mutually exclusive.'.format(opt1, opt2))
return False
if options.convention and options.convention not in conventions:
log.error("Illegal convention '{}'. Possible conventions: {}"
.format(options.convention,
', '.join(conventions.keys())))
return False
return True
@classmethod
def _has_exclusive_option(cls, options):
"""Return `True` iff one or more exclusive options were selected."""
return any([getattr(options, opt) is not None for opt in
cls.BASE_ERROR_SELECTION_OPTIONS])
@staticmethod
def _fix_set_options(options):
"""Alter the set options from None/strings to sets in place."""
optional_set_options = ('ignore', 'select')
mandatory_set_options = ('add_ignore', 'add_select')
def _get_set(value_str):
"""Split `value_str` by the delimiter `,` and return a set.
Removes any occurrences of '' in the set.
"""
return set(value_str.split(',')) - {''}
for opt in optional_set_options:
value = getattr(options, opt)
if value is not None:
setattr(options, opt, _get_set(value))
for opt in mandatory_set_options:
value = getattr(options, opt)
if value is None:
value = ''
if not isinstance(value, Set):
value = _get_set(value)
setattr(options, opt, value)
return options
@classmethod
def _create_option_parser(cls):
"""Return an option parser to parse the command line arguments."""
from optparse import OptionParser
parser = OptionParser(
version=__version__,
usage='Usage: pydocstyle [options] [<file|dir>...]')
option = parser.add_option
# Run configuration options
option('-e', '--explain', action='store_true', default=False,
help='show explanation of each error')
option('-s', '--source', action='store_true', default=False,
help='show source for each error')
option('-d', '--debug', action='store_true', default=False,
help='print debug information')
option('-v', '--verbose', action='store_true', default=False,
help='print status information')
option('--count', action='store_true', default=False,
help='print total number of errors to stdout')
option('--config', metavar='<path>', default=None,
help='use given config file and disable config discovery')
# Error check options
option('--select', metavar='<codes>', default=None,
help='choose the basic list of checked errors by '
'specifying which errors to check for (with a list of '
'comma-separated error codes or prefixes). '
'for example: --select=D101,D2')
option('--ignore', metavar='<codes>', default=None,
help='choose the basic list of checked errors by '
'specifying which errors to ignore (with a list of '
'comma-separated error codes or prefixes). '
'for example: --ignore=D101,D2')
option('--convention', metavar='<name>', default=None,
help='choose the basic list of checked errors by specifying an '
'existing convention. Possible conventions: {}'
.format(', '.join(conventions)))
option('--add-select', metavar='<codes>', default=None,
help='amend the list of errors to check for by specifying '
'more error codes to check.')
option('--add-ignore', metavar='<codes>', default=None,
help='amend the list of errors to check for by specifying '
'more error codes to ignore.')
# Match clauses
option('--match', metavar='<pattern>', default=None,
help=("check only files that exactly match <pattern> regular "
"expression; default is --match='{}' which matches "
"files that don't start with 'test_' but end with "
"'.py'").format(cls.DEFAULT_MATCH_RE))
option('--match-dir', metavar='<pattern>', default=None,
help=("search only dirs that exactly match <pattern> regular "
"expression; default is --match-dir='{}', which "
"matches all dirs that don't start with "
"a dot").format(cls.DEFAULT_MATCH_DIR_RE))
# Decorators
option('--ignore-decorators', metavar='<decorators>', default=None,
help=("ignore any functions or methods that are decorated "
"by a function with a name fitting the <decorators> "
"regular expression; default is --ignore-decorators='{0}'"
" which does not ignore any decorated functions."
.format(cls.DEFAULT_IGNORE_DECORATORS_RE)))
return parser
# Check configuration - used by the ConfigurationParser class.
CheckConfiguration = namedtuple('CheckConfiguration',
('checked_codes', 'match', 'match_dir',
'ignore_decorators'))
class IllegalConfiguration(Exception):
"""An exception for illegal configurations."""
pass
# General configurations for pydocstyle run.
RunConfiguration = namedtuple('RunConfiguration',
('explain', 'source', 'debug',
'verbose', 'count', 'config'))
|
py | b4069df5b7750d8d88b6155047b24007c1036161 | import random
import copy
class MyPlayer:
'''This Player plays randomly'''
def __init__(self, my_color, opponent_color):
self.name = 'krivast1'
self.my_color = my_color
self.opponent_color = opponent_color
self.size = 8
self.tokens_on_board = 4
self.moves = [(-1,-1), (+1,+1), (-1,+1), (+1,-1), (+1,0), (0,+1), (-1,0), (0,-1)]
self.eval_matrix = [
[20,0 ,10,10,10,10,0 ,20],
[0 ,0 ,2 ,2 ,2 ,2 ,0 ,0 ],
[10,2 ,10,8 ,8 ,10,2 ,10],
[10,2 ,8 ,5 ,5 ,8 ,2 ,10],
[10,2 ,8 ,5 ,5 ,8 ,2 ,10],
[10,2 ,10,8 ,8 ,10,2 ,10],
[0 ,0 ,2 ,2 ,2 ,2 ,0 ,0 ],
[20,0 ,10,10,10,10,0 ,20]]
def move(self, board):
max_token_flips = self.size**2
my_tokens, enemy_tokens, free_tokens = self.get_coords(board, self.my_color)
my_possible_moves = self.get_possible_moves(my_tokens, enemy_tokens, free_tokens)
evaluated_moves = []
for coords in my_possible_moves:
new_board = self.swap_stones(board, coords, self.my_color, self.opponent_color)
max_value = self.minimax(
new_board,
self.opponent_color,
self.my_color,
1,
-max_token_flips,
max_token_flips,
False)
value = max_value
evaluated_moves.append(value)
self.tokens_on_board += 2
if my_possible_moves:
return my_possible_moves[evaluated_moves.index(max(evaluated_moves))]
else:
return None
def evaluate(self, board, own_color, my_possible_moves, opponent_possible_moves, my_tokens, enemy_tokens):
board_size = self.size**2
if self.tokens_on_board < 3*board_size/4:
evaluation = len(my_possible_moves) - len(opponent_possible_moves)
else:
evaluation = len(my_tokens) - len(enemy_tokens)
return evaluation
def minimax(self, board, own_color, opponent_color, depth, alpha, beta, maximize):
my_tokens, enemy_tokens, free_tokens = self.get_coords(board, own_color)
my_possible_moves = self.get_possible_moves(my_tokens, enemy_tokens, free_tokens)
if depth == 0:
opponent_possible_moves = self.get_possible_moves(enemy_tokens, my_tokens, free_tokens)
evaluation = self.evaluate(
board,
own_color,
my_possible_moves,
opponent_possible_moves,
my_tokens,
enemy_tokens)
return evaluation
max_token_flips = self.size**2
if maximize:
max_value = -max_token_flips
for coords in my_possible_moves:
new_board = self.swap_stones(board, coords, own_color, opponent_color)
max_tokens = self.minimax(new_board, opponent_color, own_color, depth - 1, alpha, beta, False)
max_value = max(max_value, max_tokens)
alpha = max(alpha, max_tokens)
if beta <= alpha:
break
return max_value
else:
min_value = max_token_flips
for coords in my_possible_moves:
new_board = self.swap_stones(board, coords, own_color, opponent_color)
min_tokens = self.minimax(new_board, opponent_color, own_color, depth - 1, alpha, beta, True)
min_value = min(min_value, min_tokens)
beta = min(beta, min_tokens)
if beta <= alpha:
break
return min_value
def swap_stones(self, board, coords, own_color, opponent_color):
new_board = copy.deepcopy(board)
new_board[coords[0]][coords[1]] = own_color
for move in self.moves:
next_row = coords[0] + move[0]
next_col = coords[1] + move[1]
row_border = next_row < self.size and next_row >= 0
col_border = next_col < self.size and next_col >= 0
if not row_border or not col_border: continue
while new_board[next_row][next_col] == opponent_color:
next_row += move[0]
next_col += move[1]
row_border = next_row < self.size and next_row >= 0
col_border = next_col < self.size and next_col >= 0
if not row_border or not col_border: break
if not row_border or not col_border: continue
if new_board[next_row][next_col] == own_color:
while next_row != coords[0] or next_col != coords[1]:
new_board[next_row][next_col] = own_color
next_row -= move[0]
next_col -= move[1]
return new_board
# Testing function
def prt(self, board):
for i in board:
line = ""
for j in i:
if j == -1:
j = 'a'
line += " " + str(j)
print(line)
print("\n")
def get_coords(self, board, color):
my_tokens = []
enemy_tokens = []
free_tokens = []
for row in range(self.size):
for col in range(self.size):
if board[row][col] == -1:
free_tokens.append((row,col))
elif board[row][col] == color:
my_tokens.append((row,col))
else:
enemy_tokens.append((row,col))
return my_tokens, enemy_tokens, free_tokens
def get_possible_moves(self, my_tokens, enemy_tokens, free_tokens):
possible_moves = []
for my_token in my_tokens:
for i in range(len(self.moves)):
position = tuple(x + y for x, y in zip(my_token, self.moves[i]))
if position in enemy_tokens:
while position in enemy_tokens:
position = tuple(x + y for x, y in zip(position, self.moves[i]))
if position in free_tokens:
possible_moves.append(position)
return possible_moves
|
py | b4069ebb1361269d39d4a310b18a40983e37c5c7 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AnalyzedTokenInfo(msrest.serialization.Model):
"""Information about a token returned by an analyzer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar token: Required. The token returned by the analyzer.
:vartype token: str
:ivar start_offset: Required. The index of the first character of the token in the input text.
:vartype start_offset: int
:ivar end_offset: Required. The index of the last character of the token in the input text.
:vartype end_offset: int
:ivar position: Required. The position of the token in the input text relative to other tokens.
The first token in the input text has position 0, the next has position 1, and so on. Depending
on the analyzer used, some tokens might have the same position, for example if they are
synonyms of each other.
:vartype position: int
"""
_validation = {
'token': {'required': True, 'readonly': True},
'start_offset': {'required': True, 'readonly': True},
'end_offset': {'required': True, 'readonly': True},
'position': {'required': True, 'readonly': True},
}
_attribute_map = {
'token': {'key': 'token', 'type': 'str'},
'start_offset': {'key': 'startOffset', 'type': 'int'},
'end_offset': {'key': 'endOffset', 'type': 'int'},
'position': {'key': 'position', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AnalyzedTokenInfo, self).__init__(**kwargs)
self.token = None
self.start_offset = None
self.end_offset = None
self.position = None
class AnalyzeRequest(msrest.serialization.Model):
"""Specifies some text and analysis components used to break that text into tokens.
All required parameters must be populated in order to send to Azure.
:ivar text: Required. The text to break into tokens.
:vartype text: str
:ivar analyzer: The name of the analyzer to use to break the given text. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
"zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
"da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
"et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
"de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
"hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
"id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
"kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
"ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
"pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
"sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:ivar tokenizer: The name of the tokenizer to use to break the given text. Possible values
include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase",
"microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram",
"path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
:vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:ivar normalizer: The name of the normalizer to use to normalize the given text. Possible
values include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
:vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
:ivar token_filters: An optional list of token filters to use when breaking the given text.
:vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:ivar char_filters: An optional list of character filters to use when breaking the given text.
:vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
'text': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'analyzer': {'key': 'analyzer', 'type': 'str'},
'tokenizer': {'key': 'tokenizer', 'type': 'str'},
'normalizer': {'key': 'normalizer', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword text: Required. The text to break into tokens.
:paramtype text: str
:keyword analyzer: The name of the analyzer to use to break the given text. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
"zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
"da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
"et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
"de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
"hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
"id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
"kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
"ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
"pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
"sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values
include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase",
"microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram",
"path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
:paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:keyword normalizer: The name of the normalizer to use to normalize the given text. Possible
values include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
:paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
:keyword token_filters: An optional list of token filters to use when breaking the given text.
:paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:keyword char_filters: An optional list of character filters to use when breaking the given
text.
:paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
super(AnalyzeRequest, self).__init__(**kwargs)
self.text = kwargs['text']
self.analyzer = kwargs.get('analyzer', None)
self.tokenizer = kwargs.get('tokenizer', None)
self.normalizer = kwargs.get('normalizer', None)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
class AnalyzeResult(msrest.serialization.Model):
"""The result of testing an analyzer on text.
All required parameters must be populated in order to send to Azure.
:ivar tokens: Required. The list of tokens returned by the analyzer specified in the request.
:vartype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
"""
_validation = {
'tokens': {'required': True},
}
_attribute_map = {
'tokens': {'key': 'tokens', 'type': '[AnalyzedTokenInfo]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword tokens: Required. The list of tokens returned by the analyzer specified in the
request.
:paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
"""
super(AnalyzeResult, self).__init__(**kwargs)
self.tokens = kwargs['tokens']
class TokenFilter(msrest.serialization.Model):
"""Base type for token filters.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AsciiFoldingTokenFilter, CjkBigramTokenFilter, CommonGramTokenFilter, DictionaryDecompounderTokenFilter, EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, ElisionTokenFilter, KeepTokenFilter, KeywordMarkerTokenFilter, LengthTokenFilter, LimitTokenFilter, NGramTokenFilter, NGramTokenFilterV2, PatternCaptureTokenFilter, PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, SnowballTokenFilter, StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, SynonymTokenFilter, TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.AsciiFoldingTokenFilter': 'AsciiFoldingTokenFilter', '#Microsoft.Azure.Search.CjkBigramTokenFilter': 'CjkBigramTokenFilter', '#Microsoft.Azure.Search.CommonGramTokenFilter': 'CommonGramTokenFilter', '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter': 'DictionaryDecompounderTokenFilter', '#Microsoft.Azure.Search.EdgeNGramTokenFilter': 'EdgeNGramTokenFilter', '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2': 'EdgeNGramTokenFilterV2', '#Microsoft.Azure.Search.ElisionTokenFilter': 'ElisionTokenFilter', '#Microsoft.Azure.Search.KeepTokenFilter': 'KeepTokenFilter', '#Microsoft.Azure.Search.KeywordMarkerTokenFilter': 'KeywordMarkerTokenFilter', '#Microsoft.Azure.Search.LengthTokenFilter': 'LengthTokenFilter', '#Microsoft.Azure.Search.LimitTokenFilter': 'LimitTokenFilter', '#Microsoft.Azure.Search.NGramTokenFilter': 'NGramTokenFilter', '#Microsoft.Azure.Search.NGramTokenFilterV2': 'NGramTokenFilterV2', '#Microsoft.Azure.Search.PatternCaptureTokenFilter': 'PatternCaptureTokenFilter', '#Microsoft.Azure.Search.PatternReplaceTokenFilter': 'PatternReplaceTokenFilter', '#Microsoft.Azure.Search.PhoneticTokenFilter': 'PhoneticTokenFilter', '#Microsoft.Azure.Search.ShingleTokenFilter': 'ShingleTokenFilter', '#Microsoft.Azure.Search.SnowballTokenFilter': 'SnowballTokenFilter', '#Microsoft.Azure.Search.StemmerOverrideTokenFilter': 'StemmerOverrideTokenFilter', '#Microsoft.Azure.Search.StemmerTokenFilter': 'StemmerTokenFilter', '#Microsoft.Azure.Search.StopwordsTokenFilter': 'StopwordsTokenFilter', '#Microsoft.Azure.Search.SynonymTokenFilter': 'SynonymTokenFilter', '#Microsoft.Azure.Search.TruncateTokenFilter': 'TruncateTokenFilter', '#Microsoft.Azure.Search.UniqueTokenFilter': 'UniqueTokenFilter', '#Microsoft.Azure.Search.WordDelimiterTokenFilter': 'WordDelimiterTokenFilter'}
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
"""
super(TokenFilter, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
class AsciiFoldingTokenFilter(TokenFilter):
"""Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar preserve_original: A value indicating whether the original token will be kept. Default is
false.
:vartype preserve_original: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword preserve_original: A value indicating whether the original token will be kept. Default
is false.
:paramtype preserve_original: bool
"""
super(AsciiFoldingTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' # type: str
self.preserve_original = kwargs.get('preserve_original', False)
class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model):
"""Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault.
All required parameters must be populated in order to send to Azure.
:ivar application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
:vartype application_id: str
:ivar application_secret: The authentication key of the specified AAD application.
:vartype application_secret: str
"""
_validation = {
'application_id': {'required': True},
}
_attribute_map = {
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_secret': {'key': 'applicationSecret', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
:paramtype application_id: str
:keyword application_secret: The authentication key of the specified AAD application.
:paramtype application_secret: str
"""
super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs)
self.application_id = kwargs['application_id']
self.application_secret = kwargs.get('application_secret', None)
class SearchIndexerSkill(msrest.serialization.Model):
"""Base type for skills.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureMachineLearningSkill, WebApiSkill, CustomEntityLookupSkill, EntityRecognitionSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, PIIDetectionSkill, SentimentSkill, SplitSkill, TextTranslationSkill, EntityLinkingSkill, EntityRecognitionSkillV3, SentimentSkillV3, ConditionalSkill, DocumentExtractionSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Skills.Custom.AmlSkill': 'AzureMachineLearningSkill', '#Microsoft.Skills.Custom.WebApiSkill': 'WebApiSkill', '#Microsoft.Skills.Text.CustomEntityLookupSkill': 'CustomEntityLookupSkill', '#Microsoft.Skills.Text.EntityRecognitionSkill': 'EntityRecognitionSkill', '#Microsoft.Skills.Text.KeyPhraseExtractionSkill': 'KeyPhraseExtractionSkill', '#Microsoft.Skills.Text.LanguageDetectionSkill': 'LanguageDetectionSkill', '#Microsoft.Skills.Text.MergeSkill': 'MergeSkill', '#Microsoft.Skills.Text.PIIDetectionSkill': 'PIIDetectionSkill', '#Microsoft.Skills.Text.SentimentSkill': 'SentimentSkill', '#Microsoft.Skills.Text.SplitSkill': 'SplitSkill', '#Microsoft.Skills.Text.TranslationSkill': 'TextTranslationSkill', '#Microsoft.Skills.Text.V3.EntityLinkingSkill': 'EntityLinkingSkill', '#Microsoft.Skills.Text.V3.EntityRecognitionSkill': 'EntityRecognitionSkillV3', '#Microsoft.Skills.Text.V3.SentimentSkill': 'SentimentSkillV3', '#Microsoft.Skills.Util.ConditionalSkill': 'ConditionalSkill', '#Microsoft.Skills.Util.DocumentExtractionSkill': 'DocumentExtractionSkill', '#Microsoft.Skills.Util.ShaperSkill': 'ShaperSkill', '#Microsoft.Skills.Vision.ImageAnalysisSkill': 'ImageAnalysisSkill', '#Microsoft.Skills.Vision.OcrSkill': 'OcrSkill'}
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
super(SearchIndexerSkill, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.context = kwargs.get('context', None)
self.inputs = kwargs['inputs']
self.outputs = kwargs['outputs']
class AzureMachineLearningSkill(SearchIndexerSkill):
"""The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar scoring_uri: (Required for no authentication or key authentication) The scoring URI of
the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed.
:vartype scoring_uri: str
:ivar authentication_key: (Required for key authentication) The key for the AML service.
:vartype authentication_key: str
:ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID
of the AML service. It should be in the format
subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}.
:vartype resource_id: str
:ivar timeout: (Optional) When specified, indicates the timeout for the http client making the
API call.
:vartype timeout: ~datetime.timedelta
:ivar region: (Optional for token authentication). The region the AML service is deployed in.
:vartype region: str
:ivar degree_of_parallelism: (Optional) When specified, indicates the number of calls the
indexer will make in parallel to the endpoint you have provided. You can decrease this value if
your endpoint is failing under too high of a request load, or raise it if your endpoint is able
to accept more requests and you would like an increase in the performance of the indexer. If
not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10
and a minimum of 1.
:vartype degree_of_parallelism: int
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'scoring_uri': {'key': 'uri', 'type': 'str'},
'authentication_key': {'key': 'key', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'region': {'key': 'region', 'type': 'str'},
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword scoring_uri: (Required for no authentication or key authentication) The scoring URI of
the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed.
:paramtype scoring_uri: str
:keyword authentication_key: (Required for key authentication) The key for the AML service.
:paramtype authentication_key: str
:keyword resource_id: (Required for token authentication). The Azure Resource Manager resource
ID of the AML service. It should be in the format
subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}.
:paramtype resource_id: str
:keyword timeout: (Optional) When specified, indicates the timeout for the http client making
the API call.
:paramtype timeout: ~datetime.timedelta
:keyword region: (Optional for token authentication). The region the AML service is deployed
in.
:paramtype region: str
:keyword degree_of_parallelism: (Optional) When specified, indicates the number of calls the
indexer will make in parallel to the endpoint you have provided. You can decrease this value if
your endpoint is failing under too high of a request load, or raise it if your endpoint is able
to accept more requests and you would like an increase in the performance of the indexer. If
not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10
and a minimum of 1.
:paramtype degree_of_parallelism: int
"""
super(AzureMachineLearningSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Custom.AmlSkill' # type: str
self.scoring_uri = kwargs.get('scoring_uri', None)
self.authentication_key = kwargs.get('authentication_key', None)
self.resource_id = kwargs.get('resource_id', None)
self.timeout = kwargs.get('timeout', None)
self.region = kwargs.get('region', None)
self.degree_of_parallelism = kwargs.get('degree_of_parallelism', None)
class Similarity(msrest.serialization.Model):
"""Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: BM25Similarity, ClassicSimilarity.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Constant filled by server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.BM25Similarity': 'BM25Similarity', '#Microsoft.Azure.Search.ClassicSimilarity': 'ClassicSimilarity'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(Similarity, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class BM25Similarity(Similarity):
"""Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter).
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Constant filled by server.
:vartype odata_type: str
:ivar k1: This property controls the scaling function between the term frequency of each
matching terms and the final relevance score of a document-query pair. By default, a value of
1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.
:vartype k1: float
:ivar b: This property controls how the length of a document affects the relevance score. By
default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied,
while a value of 1.0 means the score is fully normalized by the length of the document.
:vartype b: float
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'k1': {'key': 'k1', 'type': 'float'},
'b': {'key': 'b', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
"""
:keyword k1: This property controls the scaling function between the term frequency of each
matching terms and the final relevance score of a document-query pair. By default, a value of
1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.
:paramtype k1: float
:keyword b: This property controls how the length of a document affects the relevance score. By
default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied,
while a value of 1.0 means the score is fully normalized by the length of the document.
:paramtype b: float
"""
super(BM25Similarity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' # type: str
self.k1 = kwargs.get('k1', None)
self.b = kwargs.get('b', None)
class CharFilter(msrest.serialization.Model):
"""Base type for character filters.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MappingCharFilter, PatternReplaceCharFilter.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.MappingCharFilter': 'MappingCharFilter', '#Microsoft.Azure.Search.PatternReplaceCharFilter': 'PatternReplaceCharFilter'}
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
"""
super(CharFilter, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
class CjkBigramTokenFilter(TokenFilter):
"""Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar ignore_scripts: The scripts to ignore.
:vartype ignore_scripts: list[str or
~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts]
:ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if
true), or just bigrams (if false). Default is false.
:vartype output_unigrams: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'ignore_scripts': {'key': 'ignoreScripts', 'type': '[str]'},
'output_unigrams': {'key': 'outputUnigrams', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword ignore_scripts: The scripts to ignore.
:paramtype ignore_scripts: list[str or
~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts]
:keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if
true), or just bigrams (if false). Default is false.
:paramtype output_unigrams: bool
"""
super(CjkBigramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' # type: str
self.ignore_scripts = kwargs.get('ignore_scripts', None)
self.output_unigrams = kwargs.get('output_unigrams', False)
class ClassicSimilarity(Similarity):
"""Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Constant filled by server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ClassicSimilarity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' # type: str
class LexicalTokenizer(msrest.serialization.Model):
"""Base type for tokenizers.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, UaxUrlEmailTokenizer.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'LuceneStandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'LuceneStandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'}
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
"""
super(LexicalTokenizer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
class ClassicTokenizer(LexicalTokenizer):
"""Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:vartype max_token_length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:paramtype max_token_length: int
"""
super(ClassicTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
class CognitiveServicesAccount(msrest.serialization.Model):
"""Base type for describing any cognitive service resource attached to a skillset.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CognitiveServicesAccountKey, DefaultCognitiveServicesAccount.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
:vartype odata_type: str
:ivar description: Description of the cognitive service resource attached to a skillset.
:vartype description: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.CognitiveServicesByKey': 'CognitiveServicesAccountKey', '#Microsoft.Azure.Search.DefaultCognitiveServices': 'DefaultCognitiveServicesAccount'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description of the cognitive service resource attached to a skillset.
:paramtype description: str
"""
super(CognitiveServicesAccount, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.description = kwargs.get('description', None)
class CognitiveServicesAccountKey(CognitiveServicesAccount):
"""A cognitive service resource provisioned with a key that is attached to a skillset.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
:vartype odata_type: str
:ivar description: Description of the cognitive service resource attached to a skillset.
:vartype description: str
:ivar key: Required. The key used to provision the cognitive service resource attached to a
skillset.
:vartype key: str
"""
_validation = {
'odata_type': {'required': True},
'key': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description of the cognitive service resource attached to a skillset.
:paramtype description: str
:keyword key: Required. The key used to provision the cognitive service resource attached to a
skillset.
:paramtype key: str
"""
super(CognitiveServicesAccountKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' # type: str
self.key = kwargs['key']
class CommonGramTokenFilter(TokenFilter):
"""Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar common_words: Required. The set of common words.
:vartype common_words: list[str]
:ivar ignore_case: A value indicating whether common words matching will be case insensitive.
Default is false.
:vartype ignore_case: bool
:ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in
query mode, the token filter generates bigrams and then removes common words and single terms
followed by a common word. Default is false.
:vartype use_query_mode: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'common_words': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'common_words': {'key': 'commonWords', 'type': '[str]'},
'ignore_case': {'key': 'ignoreCase', 'type': 'bool'},
'use_query_mode': {'key': 'queryMode', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword common_words: Required. The set of common words.
:paramtype common_words: list[str]
:keyword ignore_case: A value indicating whether common words matching will be case
insensitive. Default is false.
:paramtype ignore_case: bool
:keyword use_query_mode: A value that indicates whether the token filter is in query mode. When
in query mode, the token filter generates bigrams and then removes common words and single
terms followed by a common word. Default is false.
:paramtype use_query_mode: bool
"""
super(CommonGramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' # type: str
self.common_words = kwargs['common_words']
self.ignore_case = kwargs.get('ignore_case', False)
self.use_query_mode = kwargs.get('use_query_mode', False)
class ConditionalSkill(SearchIndexerSkill):
"""A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
super(ConditionalSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' # type: str
class CorsOptions(msrest.serialization.Model):
"""Defines options to control Cross-Origin Resource Sharing (CORS) for an index.
All required parameters must be populated in order to send to Azure.
:ivar allowed_origins: Required. The list of origins from which JavaScript code will be granted
access to your index. Can contain a list of hosts of the form
{protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not
recommended).
:vartype allowed_origins: list[str]
:ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight
responses. Defaults to 5 minutes.
:vartype max_age_in_seconds: long
"""
_validation = {
'allowed_origins': {'required': True},
}
_attribute_map = {
'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'},
'max_age_in_seconds': {'key': 'maxAgeInSeconds', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
:keyword allowed_origins: Required. The list of origins from which JavaScript code will be
granted access to your index. Can contain a list of hosts of the form
{protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not
recommended).
:paramtype allowed_origins: list[str]
:keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight
responses. Defaults to 5 minutes.
:paramtype max_age_in_seconds: long
"""
super(CorsOptions, self).__init__(**kwargs)
self.allowed_origins = kwargs['allowed_origins']
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
class LexicalAnalyzer(msrest.serialization.Model):
"""Base type for analyzers.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'LuceneStandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'}
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the analyzer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
"""
super(LexicalAnalyzer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
class CustomAnalyzer(LexicalAnalyzer):
"""Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
"edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
"microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
"standard_v2", "uax_url_email", "whitespace".
:vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:ivar token_filters: A list of token filters used to filter out or modify the tokens generated
by a tokenizer. For example, you can specify a lowercase filter that converts all characters to
lowercase. The filters are run in the order in which they are listed.
:vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:ivar char_filters: A list of character filters used to prepare input text before it is
processed by the tokenizer. For instance, they can replace certain characters or symbols. The
filters are run in the order in which they are listed.
:vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'tokenizer': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tokenizer': {'key': 'tokenizer', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the analyzer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
"edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
"microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
"standard_v2", "uax_url_email", "whitespace".
:paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:keyword token_filters: A list of token filters used to filter out or modify the tokens
generated by a tokenizer. For example, you can specify a lowercase filter that converts all
characters to lowercase. The filters are run in the order in which they are listed.
:paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:keyword char_filters: A list of character filters used to prepare input text before it is
processed by the tokenizer. For instance, they can replace certain characters or symbols. The
filters are run in the order in which they are listed.
:paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
super(CustomAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' # type: str
self.tokenizer = kwargs['tokenizer']
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
class CustomEntity(msrest.serialization.Model):
"""An object that contains information about the matches that were found, and related metadata.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The top-level entity descriptor. Matches in the skill output will be
grouped by this name, and it should represent the "normalized" form of the text being found.
:vartype name: str
:ivar description: This field can be used as a passthrough for custom metadata about the
matched text(s). The value of this field will appear with every match of its entity in the
skill output.
:vartype description: str
:ivar type: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
:vartype type: str
:ivar subtype: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
:vartype subtype: str
:ivar id: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
:vartype id: str
:ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
entity name should be sensitive to character casing. Sample case insensitive matches of
"Microsoft" could be: microsoft, microSoft, MICROSOFT.
:vartype case_sensitive: bool
:ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
entity name should be sensitive to accent.
:vartype accent_sensitive: bool
:ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of
divergent characters that would still constitute a match with the entity name. The smallest
possible fuzziness for any given match is returned. For instance, if the edit distance is set
to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case
sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but
otherwise do.
:vartype fuzzy_edit_distance: int
:ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be
used to change the default value of all aliases caseSensitive values.
:vartype default_case_sensitive: bool
:ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity.
It be used to change the default value of all aliases accentSensitive values.
:vartype default_accent_sensitive: bool
:ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
entity. It can be used to change the default value of all aliases fuzzyEditDistance values.
:vartype default_fuzzy_edit_distance: int
:ivar aliases: An array of complex objects that can be used to specify alternative spellings or
synonyms to the root entity name.
:vartype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subtype': {'key': 'subtype', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'case_sensitive': {'key': 'caseSensitive', 'type': 'bool'},
'accent_sensitive': {'key': 'accentSensitive', 'type': 'bool'},
'fuzzy_edit_distance': {'key': 'fuzzyEditDistance', 'type': 'int'},
'default_case_sensitive': {'key': 'defaultCaseSensitive', 'type': 'bool'},
'default_accent_sensitive': {'key': 'defaultAccentSensitive', 'type': 'bool'},
'default_fuzzy_edit_distance': {'key': 'defaultFuzzyEditDistance', 'type': 'int'},
'aliases': {'key': 'aliases', 'type': '[CustomEntityAlias]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The top-level entity descriptor. Matches in the skill output will be
grouped by this name, and it should represent the "normalized" form of the text being found.
:paramtype name: str
:keyword description: This field can be used as a passthrough for custom metadata about the
matched text(s). The value of this field will appear with every match of its entity in the
skill output.
:paramtype description: str
:keyword type: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
:paramtype type: str
:keyword subtype: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
:paramtype subtype: str
:keyword id: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
:paramtype id: str
:keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
entity name should be sensitive to character casing. Sample case insensitive matches of
"Microsoft" could be: microsoft, microSoft, MICROSOFT.
:paramtype case_sensitive: bool
:keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with
the entity name should be sensitive to accent.
:paramtype accent_sensitive: bool
:keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number
of divergent characters that would still constitute a match with the entity name. The smallest
possible fuzziness for any given match is returned. For instance, if the edit distance is set
to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case
sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but
otherwise do.
:paramtype fuzzy_edit_distance: int
:keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It
be used to change the default value of all aliases caseSensitive values.
:paramtype default_case_sensitive: bool
:keyword default_accent_sensitive: Changes the default accent sensitivity value for this
entity. It be used to change the default value of all aliases accentSensitive values.
:paramtype default_accent_sensitive: bool
:keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
entity. It can be used to change the default value of all aliases fuzzyEditDistance values.
:paramtype default_fuzzy_edit_distance: int
:keyword aliases: An array of complex objects that can be used to specify alternative spellings
or synonyms to the root entity name.
:paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
"""
super(CustomEntity, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.type = kwargs.get('type', None)
self.subtype = kwargs.get('subtype', None)
self.id = kwargs.get('id', None)
self.case_sensitive = kwargs.get('case_sensitive', None)
self.accent_sensitive = kwargs.get('accent_sensitive', None)
self.fuzzy_edit_distance = kwargs.get('fuzzy_edit_distance', None)
self.default_case_sensitive = kwargs.get('default_case_sensitive', None)
self.default_accent_sensitive = kwargs.get('default_accent_sensitive', None)
self.default_fuzzy_edit_distance = kwargs.get('default_fuzzy_edit_distance', None)
self.aliases = kwargs.get('aliases', None)
class CustomEntityAlias(msrest.serialization.Model):
"""A complex object that can be used to specify alternative spellings or synonyms to the root entity name.
All required parameters must be populated in order to send to Azure.
:ivar text: Required. The text of the alias.
:vartype text: str
:ivar case_sensitive: Determine if the alias is case sensitive.
:vartype case_sensitive: bool
:ivar accent_sensitive: Determine if the alias is accent sensitive.
:vartype accent_sensitive: bool
:ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
:vartype fuzzy_edit_distance: int
"""
_validation = {
'text': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'case_sensitive': {'key': 'caseSensitive', 'type': 'bool'},
'accent_sensitive': {'key': 'accentSensitive', 'type': 'bool'},
'fuzzy_edit_distance': {'key': 'fuzzyEditDistance', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword text: Required. The text of the alias.
:paramtype text: str
:keyword case_sensitive: Determine if the alias is case sensitive.
:paramtype case_sensitive: bool
:keyword accent_sensitive: Determine if the alias is accent sensitive.
:paramtype accent_sensitive: bool
:keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
:paramtype fuzzy_edit_distance: int
"""
super(CustomEntityAlias, self).__init__(**kwargs)
self.text = kwargs['text']
self.case_sensitive = kwargs.get('case_sensitive', None)
self.accent_sensitive = kwargs.get('accent_sensitive', None)
self.fuzzy_edit_distance = kwargs.get('fuzzy_edit_distance', None)
class CustomEntityLookupSkill(SearchIndexerSkill):
"""A skill looks for text from a custom, user-defined list of words and phrases.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
:vartype default_language_code: str or
~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage
:ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to
match against. This entity definition is read at the beginning of an indexer run. Any updates
to this file during an indexer run will not take effect until subsequent runs. This config must
be accessible over HTTPS.
:vartype entities_definition_uri: str
:ivar inline_entities_definition: The inline CustomEntity definition.
:vartype inline_entities_definition: list[~azure.search.documents.indexes.models.CustomEntity]
:ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not
set in CustomEntity, this value will be the default value.
:vartype global_default_case_sensitive: bool
:ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is
not set in CustomEntity, this value will be the default value.
:vartype global_default_accent_sensitive: bool
:ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
FuzzyEditDistance is not set in CustomEntity, this value will be the default value.
:vartype global_default_fuzzy_edit_distance: int
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'entities_definition_uri': {'key': 'entitiesDefinitionUri', 'type': 'str'},
'inline_entities_definition': {'key': 'inlineEntitiesDefinition', 'type': '[CustomEntity]'},
'global_default_case_sensitive': {'key': 'globalDefaultCaseSensitive', 'type': 'bool'},
'global_default_accent_sensitive': {'key': 'globalDefaultAccentSensitive', 'type': 'bool'},
'global_default_fuzzy_edit_distance': {'key': 'globalDefaultFuzzyEditDistance', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage
:keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to
match against. This entity definition is read at the beginning of an indexer run. Any updates
to this file during an indexer run will not take effect until subsequent runs. This config must
be accessible over HTTPS.
:paramtype entities_definition_uri: str
:keyword inline_entities_definition: The inline CustomEntity definition.
:paramtype inline_entities_definition:
list[~azure.search.documents.indexes.models.CustomEntity]
:keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is
not set in CustomEntity, this value will be the default value.
:paramtype global_default_case_sensitive: bool
:keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive
is not set in CustomEntity, this value will be the default value.
:paramtype global_default_accent_sensitive: bool
:keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
FuzzyEditDistance is not set in CustomEntity, this value will be the default value.
:paramtype global_default_fuzzy_edit_distance: int
"""
super(CustomEntityLookupSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.CustomEntityLookupSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.entities_definition_uri = kwargs.get('entities_definition_uri', None)
self.inline_entities_definition = kwargs.get('inline_entities_definition', None)
self.global_default_case_sensitive = kwargs.get('global_default_case_sensitive', None)
self.global_default_accent_sensitive = kwargs.get('global_default_accent_sensitive', None)
self.global_default_fuzzy_edit_distance = kwargs.get('global_default_fuzzy_edit_distance', None)
class LexicalNormalizer(msrest.serialization.Model):
"""Base type for normalizers.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the normalizer.
:vartype odata_type: str
:ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding',
'standard', 'lowercase', 'uppercase', or 'elision'.
:vartype name: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword odata_type: Required. Identifies the concrete type of the normalizer.
:paramtype odata_type: str
:keyword name: Required. The name of the normalizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
:paramtype name: str
"""
super(LexicalNormalizer, self).__init__(**kwargs)
self.odata_type = kwargs['odata_type']
self.name = kwargs['name']
class CustomNormalizer(LexicalNormalizer):
"""Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the normalizer.
:vartype odata_type: str
:ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding',
'standard', 'lowercase', 'uppercase', or 'elision'.
:vartype name: str
:ivar token_filters: A list of token filters used to filter out or modify the input token. For
example, you can specify a lowercase filter that converts all characters to lowercase. The
filters are run in the order in which they are listed.
:vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:ivar char_filters: A list of character filters used to prepare input text before it is
processed. For instance, they can replace certain characters or symbols. The filters are run in
the order in which they are listed.
:vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword odata_type: Required. Identifies the concrete type of the normalizer.
:paramtype odata_type: str
:keyword name: Required. The name of the normalizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
:paramtype name: str
:keyword token_filters: A list of token filters used to filter out or modify the input token.
For example, you can specify a lowercase filter that converts all characters to lowercase. The
filters are run in the order in which they are listed.
:paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:keyword char_filters: A list of character filters used to prepare input text before it is
processed. For instance, they can replace certain characters or symbols. The filters are run in
the order in which they are listed.
:paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
super(CustomNormalizer, self).__init__(**kwargs)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
class DataChangeDetectionPolicy(msrest.serialization.Model):
"""Base type for data change detection policies.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: HighWaterMarkChangeDetectionPolicy, SqlIntegratedChangeTrackingPolicy.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy': 'HighWaterMarkChangeDetectionPolicy', '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy': 'SqlIntegratedChangeTrackingPolicy'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(DataChangeDetectionPolicy, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class DataDeletionDetectionPolicy(msrest.serialization.Model):
"""Base type for data deletion detection policies.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SoftDeleteColumnDeletionDetectionPolicy.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the data deletion detection
policy.Constant filled by server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy': 'SoftDeleteColumnDeletionDetectionPolicy'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(DataDeletionDetectionPolicy, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class DataSourceCredentials(msrest.serialization.Model):
"""Represents credentials that can be used to connect to a datasource.
:ivar connection_string: The connection string for the datasource. Set to ':code:`<unchanged>`'
if you do not want the connection string updated.
:vartype connection_string: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword connection_string: The connection string for the datasource. Set to
':code:`<unchanged>`' if you do not want the connection string updated.
:paramtype connection_string: str
"""
super(DataSourceCredentials, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
class DefaultCognitiveServicesAccount(CognitiveServicesAccount):
"""An empty object that represents the default cognitive service resource for a skillset.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
:vartype odata_type: str
:ivar description: Description of the cognitive service resource attached to a skillset.
:vartype description: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description of the cognitive service resource attached to a skillset.
:paramtype description: str
"""
super(DefaultCognitiveServicesAccount, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' # type: str
class DictionaryDecompounderTokenFilter(TokenFilter):
"""Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar word_list: Required. The list of words to match against.
:vartype word_list: list[str]
:ivar min_word_size: The minimum word size. Only words longer than this get processed. Default
is 5. Maximum is 300.
:vartype min_word_size: int
:ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted.
Default is 2. Maximum is 300.
:vartype min_subword_size: int
:ivar max_subword_size: The maximum subword size. Only subwords shorter than this are
outputted. Default is 15. Maximum is 300.
:vartype max_subword_size: int
:ivar only_longest_match: A value indicating whether to add only the longest matching subword
to the output. Default is false.
:vartype only_longest_match: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'word_list': {'required': True},
'min_word_size': {'maximum': 300},
'min_subword_size': {'maximum': 300},
'max_subword_size': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'word_list': {'key': 'wordList', 'type': '[str]'},
'min_word_size': {'key': 'minWordSize', 'type': 'int'},
'min_subword_size': {'key': 'minSubwordSize', 'type': 'int'},
'max_subword_size': {'key': 'maxSubwordSize', 'type': 'int'},
'only_longest_match': {'key': 'onlyLongestMatch', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword word_list: Required. The list of words to match against.
:paramtype word_list: list[str]
:keyword min_word_size: The minimum word size. Only words longer than this get processed.
Default is 5. Maximum is 300.
:paramtype min_word_size: int
:keyword min_subword_size: The minimum subword size. Only subwords longer than this are
outputted. Default is 2. Maximum is 300.
:paramtype min_subword_size: int
:keyword max_subword_size: The maximum subword size. Only subwords shorter than this are
outputted. Default is 15. Maximum is 300.
:paramtype max_subword_size: int
:keyword only_longest_match: A value indicating whether to add only the longest matching
subword to the output. Default is false.
:paramtype only_longest_match: bool
"""
super(DictionaryDecompounderTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' # type: str
self.word_list = kwargs['word_list']
self.min_word_size = kwargs.get('min_word_size', 5)
self.min_subword_size = kwargs.get('min_subword_size', 2)
self.max_subword_size = kwargs.get('max_subword_size', 15)
self.only_longest_match = kwargs.get('only_longest_match', False)
class ScoringFunction(msrest.serialization.Model):
"""Base type for functions that can modify document scores during ranking.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DistanceScoringFunction, FreshnessScoringFunction, MagnitudeScoringFunction, TagScoringFunction.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
:vartype type: str
:ivar field_name: Required. The name of the field used as input to the scoring function.
:vartype field_name: str
:ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
1.0.
:vartype boost: float
:ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
"""
_validation = {
'type': {'required': True},
'field_name': {'required': True},
'boost': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'boost': {'key': 'boost', 'type': 'float'},
'interpolation': {'key': 'interpolation', 'type': 'str'},
}
_subtype_map = {
'type': {'distance': 'DistanceScoringFunction', 'freshness': 'FreshnessScoringFunction', 'magnitude': 'MagnitudeScoringFunction', 'tag': 'TagScoringFunction'}
}
def __init__(
self,
**kwargs
):
"""
:keyword field_name: Required. The name of the field used as input to the scoring function.
:paramtype field_name: str
:keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
to 1.0.
:paramtype boost: float
:keyword interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:paramtype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
"""
super(ScoringFunction, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.field_name = kwargs['field_name']
self.boost = kwargs['boost']
self.interpolation = kwargs.get('interpolation', None)
class DistanceScoringFunction(ScoringFunction):
"""Defines a function that boosts scores based on distance from a geographic location.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
:vartype type: str
:ivar field_name: Required. The name of the field used as input to the scoring function.
:vartype field_name: str
:ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
1.0.
:vartype boost: float
:ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:ivar parameters: Required. Parameter values for the distance scoring function.
:vartype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
"""
_validation = {
'type': {'required': True},
'field_name': {'required': True},
'boost': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'boost': {'key': 'boost', 'type': 'float'},
'interpolation': {'key': 'interpolation', 'type': 'str'},
'parameters': {'key': 'distance', 'type': 'DistanceScoringParameters'},
}
def __init__(
self,
**kwargs
):
"""
:keyword field_name: Required. The name of the field used as input to the scoring function.
:paramtype field_name: str
:keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
to 1.0.
:paramtype boost: float
:keyword interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:paramtype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:keyword parameters: Required. Parameter values for the distance scoring function.
:paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
"""
super(DistanceScoringFunction, self).__init__(**kwargs)
self.type = 'distance' # type: str
self.parameters = kwargs['parameters']
class DistanceScoringParameters(msrest.serialization.Model):
"""Provides parameter values to a distance scoring function.
All required parameters must be populated in order to send to Azure.
:ivar reference_point_parameter: Required. The name of the parameter passed in search queries
to specify the reference location.
:vartype reference_point_parameter: str
:ivar boosting_distance: Required. The distance in kilometers from the reference location where
the boosting range ends.
:vartype boosting_distance: float
"""
_validation = {
'reference_point_parameter': {'required': True},
'boosting_distance': {'required': True},
}
_attribute_map = {
'reference_point_parameter': {'key': 'referencePointParameter', 'type': 'str'},
'boosting_distance': {'key': 'boostingDistance', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
"""
:keyword reference_point_parameter: Required. The name of the parameter passed in search
queries to specify the reference location.
:paramtype reference_point_parameter: str
:keyword boosting_distance: Required. The distance in kilometers from the reference location
where the boosting range ends.
:paramtype boosting_distance: float
"""
super(DistanceScoringParameters, self).__init__(**kwargs)
self.reference_point_parameter = kwargs['reference_point_parameter']
self.boosting_distance = kwargs['boosting_distance']
class DocumentExtractionSkill(SearchIndexerSkill):
"""A skill that extracts content from a file within the enrichment pipeline.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
:vartype parsing_mode: str
:ivar data_to_extract: The type of data to be extracted for the skill. Will be set to
'contentAndMetadata' if not defined.
:vartype data_to_extract: str
:ivar configuration: A dictionary of configurations for the skill.
:vartype configuration: dict[str, any]
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'parsing_mode': {'key': 'parsingMode', 'type': 'str'},
'data_to_extract': {'key': 'dataToExtract', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
:paramtype parsing_mode: str
:keyword data_to_extract: The type of data to be extracted for the skill. Will be set to
'contentAndMetadata' if not defined.
:paramtype data_to_extract: str
:keyword configuration: A dictionary of configurations for the skill.
:paramtype configuration: dict[str, any]
"""
super(DocumentExtractionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Util.DocumentExtractionSkill' # type: str
self.parsing_mode = kwargs.get('parsing_mode', None)
self.data_to_extract = kwargs.get('data_to_extract', None)
self.configuration = kwargs.get('configuration', None)
class DocumentKeysOrIds(msrest.serialization.Model):
"""DocumentKeysOrIds.
:ivar document_keys: document keys to be reset.
:vartype document_keys: list[str]
:ivar datasource_document_ids: datasource document identifiers to be reset.
:vartype datasource_document_ids: list[str]
"""
_attribute_map = {
'document_keys': {'key': 'documentKeys', 'type': '[str]'},
'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword document_keys: document keys to be reset.
:paramtype document_keys: list[str]
:keyword datasource_document_ids: datasource document identifiers to be reset.
:paramtype datasource_document_ids: list[str]
"""
super(DocumentKeysOrIds, self).__init__(**kwargs)
self.document_keys = kwargs.get('document_keys', None)
self.datasource_document_ids = kwargs.get('datasource_document_ids', None)
class EdgeNGramTokenFilter(TokenFilter):
"""Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
:vartype min_gram: int
:ivar max_gram: The maximum n-gram length. Default is 2.
:vartype max_gram: int
:ivar side: Specifies which side of the input the n-gram should be generated from. Default is
"front". Possible values include: "front", "back".
:vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_gram': {'key': 'minGram', 'type': 'int'},
'max_gram': {'key': 'maxGram', 'type': 'int'},
'side': {'key': 'side', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
:paramtype min_gram: int
:keyword max_gram: The maximum n-gram length. Default is 2.
:paramtype max_gram: int
:keyword side: Specifies which side of the input the n-gram should be generated from. Default
is "front". Possible values include: "front", "back".
:paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
super(EdgeNGramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' # type: str
self.min_gram = kwargs.get('min_gram', 1)
self.max_gram = kwargs.get('max_gram', 2)
self.side = kwargs.get('side', None)
class EdgeNGramTokenFilterV2(TokenFilter):
"""Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
value of maxGram.
:vartype min_gram: int
:ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:vartype max_gram: int
:ivar side: Specifies which side of the input the n-gram should be generated from. Default is
"front". Possible values include: "front", "back".
:vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'min_gram': {'maximum': 300},
'max_gram': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_gram': {'key': 'minGram', 'type': 'int'},
'max_gram': {'key': 'maxGram', 'type': 'int'},
'side': {'key': 'side', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
the value of maxGram.
:paramtype min_gram: int
:keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:paramtype max_gram: int
:keyword side: Specifies which side of the input the n-gram should be generated from. Default
is "front". Possible values include: "front", "back".
:paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
super(EdgeNGramTokenFilterV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' # type: str
self.min_gram = kwargs.get('min_gram', 1)
self.max_gram = kwargs.get('max_gram', 2)
self.side = kwargs.get('side', None)
class EdgeNGramTokenizer(LexicalTokenizer):
"""Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
value of maxGram.
:vartype min_gram: int
:ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:vartype max_gram: int
:ivar token_chars: Character classes to keep in the tokens.
:vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'min_gram': {'maximum': 300},
'max_gram': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_gram': {'key': 'minGram', 'type': 'int'},
'max_gram': {'key': 'maxGram', 'type': 'int'},
'token_chars': {'key': 'tokenChars', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
the value of maxGram.
:paramtype min_gram: int
:keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:paramtype max_gram: int
:keyword token_chars: Character classes to keep in the tokens.
:paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
super(EdgeNGramTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' # type: str
self.min_gram = kwargs.get('min_gram', 1)
self.max_gram = kwargs.get('max_gram', 2)
self.token_chars = kwargs.get('token_chars', None)
class ElisionTokenFilter(TokenFilter):
"""Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar articles: The set of articles to remove.
:vartype articles: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'articles': {'key': 'articles', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword articles: The set of articles to remove.
:paramtype articles: list[str]
"""
super(ElisionTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' # type: str
self.articles = kwargs.get('articles', None)
class EntityLinkingSkill(SearchIndexerSkill):
"""Using the Text Analytics API, extracts linked entities from text.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
:vartype default_language_code: str
:ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:vartype minimum_precision: float
:ivar model_version: The version of the model to use when calling the Text Analytics service.
It will default to the latest available when not specified. We recommend you do not specify
this value unless absolutely necessary.
:vartype model_version: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
'minimum_precision': {'maximum': 1, 'minimum': 0},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'minimum_precision': {'key': 'minimumPrecision', 'type': 'float'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
:paramtype default_language_code: str
:keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:paramtype minimum_precision: float
:keyword model_version: The version of the model to use when calling the Text Analytics
service. It will default to the latest available when not specified. We recommend you do not
specify this value unless absolutely necessary.
:paramtype model_version: str
"""
super(EntityLinkingSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.EntityLinkingSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.minimum_precision = kwargs.get('minimum_precision', None)
self.model_version = kwargs.get('model_version', None)
class EntityRecognitionSkill(SearchIndexerSkill):
"""Text analytics entity recognition.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar categories: A list of entity categories that should be extracted.
:vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de",
"el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr".
:vartype default_language_code: str or
~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage
:ivar include_typeless_entities: Determines whether or not to include entities which are well
known but don't conform to a pre-defined type. If this configuration is not set (default), set
to null or set to false, entities which don't conform to one of the pre-defined types will not
be surfaced.
:vartype include_typeless_entities: bool
:ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:vartype minimum_precision: float
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'categories': {'key': 'categories', 'type': '[str]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'include_typeless_entities': {'key': 'includeTypelessEntities', 'type': 'bool'},
'minimum_precision': {'key': 'minimumPrecision', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword categories: A list of entity categories that should be extracted.
:paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de",
"el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage
:keyword include_typeless_entities: Determines whether or not to include entities which are
well known but don't conform to a pre-defined type. If this configuration is not set (default),
set to null or set to false, entities which don't conform to one of the pre-defined types will
not be surfaced.
:paramtype include_typeless_entities: bool
:keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:paramtype minimum_precision: float
"""
super(EntityRecognitionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' # type: str
self.categories = kwargs.get('categories', None)
self.default_language_code = kwargs.get('default_language_code', None)
self.include_typeless_entities = kwargs.get('include_typeless_entities', None)
self.minimum_precision = kwargs.get('minimum_precision', None)
class EntityRecognitionSkillV3(SearchIndexerSkill):
"""Using the Text Analytics API, extracts entities of different types from text.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar categories: A list of entity categories that should be extracted.
:vartype categories: list[str]
:ivar default_language_code: A value indicating which language code to use. Default is en.
:vartype default_language_code: str
:ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:vartype minimum_precision: float
:ivar model_version: The version of the model to use when calling the Text Analytics service.
It will default to the latest available when not specified. We recommend you do not specify
this value unless absolutely necessary.
:vartype model_version: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
'minimum_precision': {'maximum': 1, 'minimum': 0},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'categories': {'key': 'categories', 'type': '[str]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'minimum_precision': {'key': 'minimumPrecision', 'type': 'float'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword categories: A list of entity categories that should be extracted.
:paramtype categories: list[str]
:keyword default_language_code: A value indicating which language code to use. Default is en.
:paramtype default_language_code: str
:keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:paramtype minimum_precision: float
:keyword model_version: The version of the model to use when calling the Text Analytics
service. It will default to the latest available when not specified. We recommend you do not
specify this value unless absolutely necessary.
:paramtype model_version: str
"""
super(EntityRecognitionSkillV3, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.EntityRecognitionSkill' # type: str
self.categories = kwargs.get('categories', None)
self.default_language_code = kwargs.get('default_language_code', None)
self.minimum_precision = kwargs.get('minimum_precision', None)
self.model_version = kwargs.get('model_version', None)
class FieldMapping(msrest.serialization.Model):
"""Defines a mapping between a field in a data source and a target field in an index.
All required parameters must be populated in order to send to Azure.
:ivar source_field_name: Required. The name of the field in the data source.
:vartype source_field_name: str
:ivar target_field_name: The name of the target field in the index. Same as the source field
name by default.
:vartype target_field_name: str
:ivar mapping_function: A function to apply to each source field value before indexing.
:vartype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
"""
_validation = {
'source_field_name': {'required': True},
}
_attribute_map = {
'source_field_name': {'key': 'sourceFieldName', 'type': 'str'},
'target_field_name': {'key': 'targetFieldName', 'type': 'str'},
'mapping_function': {'key': 'mappingFunction', 'type': 'FieldMappingFunction'},
}
def __init__(
self,
**kwargs
):
"""
:keyword source_field_name: Required. The name of the field in the data source.
:paramtype source_field_name: str
:keyword target_field_name: The name of the target field in the index. Same as the source field
name by default.
:paramtype target_field_name: str
:keyword mapping_function: A function to apply to each source field value before indexing.
:paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
"""
super(FieldMapping, self).__init__(**kwargs)
self.source_field_name = kwargs['source_field_name']
self.target_field_name = kwargs.get('target_field_name', None)
self.mapping_function = kwargs.get('mapping_function', None)
class FieldMappingFunction(msrest.serialization.Model):
"""Represents a function that transforms a value from a data source before indexing.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the field mapping function.
:vartype name: str
:ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each
value must be of a primitive type.
:vartype parameters: dict[str, any]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the field mapping function.
:paramtype name: str
:keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each
value must be of a primitive type.
:paramtype parameters: dict[str, any]
"""
super(FieldMappingFunction, self).__init__(**kwargs)
self.name = kwargs['name']
self.parameters = kwargs.get('parameters', None)
class FreshnessScoringFunction(ScoringFunction):
"""Defines a function that boosts scores based on the value of a date-time field.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
:vartype type: str
:ivar field_name: Required. The name of the field used as input to the scoring function.
:vartype field_name: str
:ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
1.0.
:vartype boost: float
:ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:ivar parameters: Required. Parameter values for the freshness scoring function.
:vartype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
"""
_validation = {
'type': {'required': True},
'field_name': {'required': True},
'boost': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'boost': {'key': 'boost', 'type': 'float'},
'interpolation': {'key': 'interpolation', 'type': 'str'},
'parameters': {'key': 'freshness', 'type': 'FreshnessScoringParameters'},
}
def __init__(
self,
**kwargs
):
"""
:keyword field_name: Required. The name of the field used as input to the scoring function.
:paramtype field_name: str
:keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
to 1.0.
:paramtype boost: float
:keyword interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:paramtype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:keyword parameters: Required. Parameter values for the freshness scoring function.
:paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
"""
super(FreshnessScoringFunction, self).__init__(**kwargs)
self.type = 'freshness' # type: str
self.parameters = kwargs['parameters']
class FreshnessScoringParameters(msrest.serialization.Model):
"""Provides parameter values to a freshness scoring function.
All required parameters must be populated in order to send to Azure.
:ivar boosting_duration: Required. The expiration period after which boosting will stop for a
particular document.
:vartype boosting_duration: ~datetime.timedelta
"""
_validation = {
'boosting_duration': {'required': True},
}
_attribute_map = {
'boosting_duration': {'key': 'boostingDuration', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
"""
:keyword boosting_duration: Required. The expiration period after which boosting will stop for
a particular document.
:paramtype boosting_duration: ~datetime.timedelta
"""
super(FreshnessScoringParameters, self).__init__(**kwargs)
self.boosting_duration = kwargs['boosting_duration']
class GetIndexStatisticsResult(msrest.serialization.Model):
"""Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar document_count: Required. The number of documents in the index.
:vartype document_count: long
:ivar storage_size: Required. The amount of storage in bytes consumed by the index.
:vartype storage_size: long
"""
_validation = {
'document_count': {'required': True, 'readonly': True},
'storage_size': {'required': True, 'readonly': True},
}
_attribute_map = {
'document_count': {'key': 'documentCount', 'type': 'long'},
'storage_size': {'key': 'storageSize', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(GetIndexStatisticsResult, self).__init__(**kwargs)
self.document_count = None
self.storage_size = None
class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy):
"""Defines a data change detection policy that captures changes based on the value of a high water mark column.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
:vartype odata_type: str
:ivar high_water_mark_column_name: Required. The name of the high water mark column.
:vartype high_water_mark_column_name: str
"""
_validation = {
'odata_type': {'required': True},
'high_water_mark_column_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'high_water_mark_column_name': {'key': 'highWaterMarkColumnName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword high_water_mark_column_name: Required. The name of the high water mark column.
:paramtype high_water_mark_column_name: str
"""
super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' # type: str
self.high_water_mark_column_name = kwargs['high_water_mark_column_name']
class ImageAnalysisSkill(SearchIndexerSkill):
"""A skill that analyzes image files. It extracts a rich set of visual features based on the image content.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "en", "es", "ja", "pt", "zh".
:vartype default_language_code: str or
~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage
:ivar visual_features: A list of visual features.
:vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
:ivar details: A string indicating which domain-specific details to return.
:vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'visual_features': {'key': 'visualFeatures', 'type': '[str]'},
'details': {'key': 'details', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "en", "es", "ja", "pt", "zh".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage
:keyword visual_features: A list of visual features.
:paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
:keyword details: A string indicating which domain-specific details to return.
:paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
"""
super(ImageAnalysisSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.visual_features = kwargs.get('visual_features', None)
self.details = kwargs.get('details', None)
class IndexerCurrentState(msrest.serialization.Model):
"""Represents all of the state that defines and dictates the indexer's current execution.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar mode: The mode the indexer is running in. Possible values include: "indexingAllDocs",
"indexingResetDocs".
:vartype mode: str or ~azure.search.documents.indexes.models.IndexingMode
:ivar all_docs_initial_change_tracking_state: Change tracking state used when indexing starts
on all documents in the datasource.
:vartype all_docs_initial_change_tracking_state: str
:ivar all_docs_final_change_tracking_state: Change tracking state value when indexing finishes
on all documents in the datasource.
:vartype all_docs_final_change_tracking_state: str
:ivar reset_docs_initial_change_tracking_state: Change tracking state used when indexing starts
on select, reset documents in the datasource.
:vartype reset_docs_initial_change_tracking_state: str
:ivar reset_docs_final_change_tracking_state: Change tracking state value when indexing
finishes on select, reset documents in the datasource.
:vartype reset_docs_final_change_tracking_state: str
:ivar reset_document_keys: The list of document keys that have been reset. The document key is
the document's unique identifier for the data in the search index. The indexer will prioritize
selectively re-ingesting these keys.
:vartype reset_document_keys: list[str]
:ivar reset_datasource_document_ids: The list of datasource document ids that have been reset.
The datasource document id is the unique identifier for the data in the datasource. The indexer
will prioritize selectively re-ingesting these ids.
:vartype reset_datasource_document_ids: list[str]
"""
_validation = {
'mode': {'readonly': True},
'all_docs_initial_change_tracking_state': {'readonly': True},
'all_docs_final_change_tracking_state': {'readonly': True},
'reset_docs_initial_change_tracking_state': {'readonly': True},
'reset_docs_final_change_tracking_state': {'readonly': True},
'reset_document_keys': {'readonly': True},
'reset_datasource_document_ids': {'readonly': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'all_docs_initial_change_tracking_state': {'key': 'allDocsInitialChangeTrackingState', 'type': 'str'},
'all_docs_final_change_tracking_state': {'key': 'allDocsFinalChangeTrackingState', 'type': 'str'},
'reset_docs_initial_change_tracking_state': {'key': 'resetDocsInitialChangeTrackingState', 'type': 'str'},
'reset_docs_final_change_tracking_state': {'key': 'resetDocsFinalChangeTrackingState', 'type': 'str'},
'reset_document_keys': {'key': 'resetDocumentKeys', 'type': '[str]'},
'reset_datasource_document_ids': {'key': 'resetDatasourceDocumentIds', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IndexerCurrentState, self).__init__(**kwargs)
self.mode = None
self.all_docs_initial_change_tracking_state = None
self.all_docs_final_change_tracking_state = None
self.reset_docs_initial_change_tracking_state = None
self.reset_docs_final_change_tracking_state = None
self.reset_document_keys = None
self.reset_datasource_document_ids = None
class IndexerExecutionResult(msrest.serialization.Model):
"""Represents the result of an individual indexer execution.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar status: Required. The outcome of this indexer execution. Possible values include:
"transientFailure", "success", "inProgress", "reset".
:vartype status: str or ~azure.search.documents.indexes.models.IndexerExecutionStatus
:ivar status_detail: The outcome of this indexer execution. Possible values include:
"resetDocs".
:vartype status_detail: str or
~azure.search.documents.indexes.models.IndexerExecutionStatusDetail
:ivar current_state: All of the state that defines and dictates the indexer's current
execution.
:vartype current_state: ~azure.search.documents.indexes.models.IndexerCurrentState
:ivar error_message: The error message indicating the top-level error, if any.
:vartype error_message: str
:ivar start_time: The start time of this indexer execution.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time of this indexer execution, if the execution has already completed.
:vartype end_time: ~datetime.datetime
:ivar errors: Required. The item-level indexing errors.
:vartype errors: list[~azure.search.documents.indexes.models.SearchIndexerError]
:ivar warnings: Required. The item-level indexing warnings.
:vartype warnings: list[~azure.search.documents.indexes.models.SearchIndexerWarning]
:ivar item_count: Required. The number of items that were processed during this indexer
execution. This includes both successfully processed items and items where indexing was
attempted but failed.
:vartype item_count: int
:ivar failed_item_count: Required. The number of items that failed to be indexed during this
indexer execution.
:vartype failed_item_count: int
:ivar initial_tracking_state: Change tracking state with which an indexer execution started.
:vartype initial_tracking_state: str
:ivar final_tracking_state: Change tracking state with which an indexer execution finished.
:vartype final_tracking_state: str
"""
_validation = {
'status': {'required': True, 'readonly': True},
'status_detail': {'readonly': True},
'current_state': {'readonly': True},
'error_message': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'errors': {'required': True, 'readonly': True},
'warnings': {'required': True, 'readonly': True},
'item_count': {'required': True, 'readonly': True},
'failed_item_count': {'required': True, 'readonly': True},
'initial_tracking_state': {'readonly': True},
'final_tracking_state': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'status_detail': {'key': 'statusDetail', 'type': 'str'},
'current_state': {'key': 'currentState', 'type': 'IndexerCurrentState'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[SearchIndexerError]'},
'warnings': {'key': 'warnings', 'type': '[SearchIndexerWarning]'},
'item_count': {'key': 'itemsProcessed', 'type': 'int'},
'failed_item_count': {'key': 'itemsFailed', 'type': 'int'},
'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'},
'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IndexerExecutionResult, self).__init__(**kwargs)
self.status = None
self.status_detail = None
self.current_state = None
self.error_message = None
self.start_time = None
self.end_time = None
self.errors = None
self.warnings = None
self.item_count = None
self.failed_item_count = None
self.initial_tracking_state = None
self.final_tracking_state = None
class IndexingParameters(msrest.serialization.Model):
"""Represents parameters for indexer execution.
:ivar batch_size: The number of items that are read from the data source and indexed as a
single batch in order to improve performance. The default depends on the data source type.
:vartype batch_size: int
:ivar max_failed_items: The maximum number of items that can fail indexing for indexer
execution to still be considered successful. -1 means no limit. Default is 0.
:vartype max_failed_items: int
:ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail
indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
:vartype max_failed_items_per_batch: int
:ivar configuration: A dictionary of indexer-specific configuration properties. Each name is
the name of a specific property. Each value must be of a primitive type.
:vartype configuration: ~azure.search.documents.indexes.models.IndexingParametersConfiguration
"""
_attribute_map = {
'batch_size': {'key': 'batchSize', 'type': 'int'},
'max_failed_items': {'key': 'maxFailedItems', 'type': 'int'},
'max_failed_items_per_batch': {'key': 'maxFailedItemsPerBatch', 'type': 'int'},
'configuration': {'key': 'configuration', 'type': 'IndexingParametersConfiguration'},
}
def __init__(
self,
**kwargs
):
"""
:keyword batch_size: The number of items that are read from the data source and indexed as a
single batch in order to improve performance. The default depends on the data source type.
:paramtype batch_size: int
:keyword max_failed_items: The maximum number of items that can fail indexing for indexer
execution to still be considered successful. -1 means no limit. Default is 0.
:paramtype max_failed_items: int
:keyword max_failed_items_per_batch: The maximum number of items in a single batch that can
fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
:paramtype max_failed_items_per_batch: int
:keyword configuration: A dictionary of indexer-specific configuration properties. Each name is
the name of a specific property. Each value must be of a primitive type.
:paramtype configuration:
~azure.search.documents.indexes.models.IndexingParametersConfiguration
"""
super(IndexingParameters, self).__init__(**kwargs)
self.batch_size = kwargs.get('batch_size', None)
self.max_failed_items = kwargs.get('max_failed_items', 0)
self.max_failed_items_per_batch = kwargs.get('max_failed_items_per_batch', 0)
self.configuration = kwargs.get('configuration', None)
class IndexingParametersConfiguration(msrest.serialization.Model):
"""A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, any]
:ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines".
Default value: "default".
:vartype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
:ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when
processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over
those files during indexing.
:vartype excluded_file_name_extensions: str
:ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when
processing from Azure blob storage. For example, you could focus indexing on specific
application files ".docx, .pptx, .msg" to specifically include those file types.
:vartype indexed_file_name_extensions: str
:ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue
indexing when an unsupported content type is encountered, and you don't know all the content
types (file extensions) in advance.
:vartype fail_on_unsupported_content_type: bool
:ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
indexing if a document fails indexing.
:vartype fail_on_unprocessable_document: bool
:ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property
to true to still index storage metadata for blob content that is too large to process.
Oversized blobs are treated as errors by default. For limits on blob size, see
https://docs.microsoft.com/azure/search/search-limits-quotas-capacity.
:vartype index_storage_metadata_only_for_oversized_documents: bool
:ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
headers, useful for mapping source fields to destination fields in an index.
:vartype delimited_text_headers: str
:ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
delimiter for CSV files where each line starts a new document (for example, "|").
:vartype delimited_text_delimiter: str
:ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of
each blob contains headers.
:vartype first_line_contains_headers: bool
:ivar document_root: For JSON arrays, given a structured or semi-structured document, you can
specify a path to the array using this property.
:vartype document_root: str
:ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the
indexer which data to extract from image content when "imageAction" is set to a value other
than "none". This applies to embedded image content in a .PDF or other application, or image
files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata",
"allMetadata", "contentAndMetadata". Default value: "contentAndMetadata".
:vartype data_to_extract: str or
~azure.search.documents.indexes.models.BlobIndexerDataToExtract
:ivar image_action: Determines how to process embedded images and image files in Azure blob
storage. Setting the "imageAction" configuration to any value other than "none" requires that
a skillset also be attached to that indexer. Possible values include: "none",
"generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none".
:vartype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
:ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that
is an object representing the original file data downloaded from your blob data source. This
allows you to pass the original file data to a custom skill for processing within the
enrichment pipeline, or to the Document Extraction skill.
:vartype allow_skillset_to_read_file_data: bool
:ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in
Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
:vartype pdf_text_rotation_algorithm: str or
~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm
:ivar execution_environment: Specifies the environment in which the indexer should execute.
Possible values include: "standard", "private". Default value: "standard".
:vartype execution_environment: str or
~azure.search.documents.indexes.models.IndexerExecutionEnvironment
:ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database
data sources, specified in the format "hh:mm:ss".
:vartype query_timeout: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'parsing_mode': {'key': 'parsingMode', 'type': 'str'},
'excluded_file_name_extensions': {'key': 'excludedFileNameExtensions', 'type': 'str'},
'indexed_file_name_extensions': {'key': 'indexedFileNameExtensions', 'type': 'str'},
'fail_on_unsupported_content_type': {'key': 'failOnUnsupportedContentType', 'type': 'bool'},
'fail_on_unprocessable_document': {'key': 'failOnUnprocessableDocument', 'type': 'bool'},
'index_storage_metadata_only_for_oversized_documents': {'key': 'indexStorageMetadataOnlyForOversizedDocuments', 'type': 'bool'},
'delimited_text_headers': {'key': 'delimitedTextHeaders', 'type': 'str'},
'delimited_text_delimiter': {'key': 'delimitedTextDelimiter', 'type': 'str'},
'first_line_contains_headers': {'key': 'firstLineContainsHeaders', 'type': 'bool'},
'document_root': {'key': 'documentRoot', 'type': 'str'},
'data_to_extract': {'key': 'dataToExtract', 'type': 'str'},
'image_action': {'key': 'imageAction', 'type': 'str'},
'allow_skillset_to_read_file_data': {'key': 'allowSkillsetToReadFileData', 'type': 'bool'},
'pdf_text_rotation_algorithm': {'key': 'pdfTextRotationAlgorithm', 'type': 'str'},
'execution_environment': {'key': 'executionEnvironment', 'type': 'str'},
'query_timeout': {'key': 'queryTimeout', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, any]
:keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines".
Default value: "default".
:paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
:keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore
when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip
over those files during indexing.
:paramtype excluded_file_name_extensions: str
:keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select
when processing from Azure blob storage. For example, you could focus indexing on specific
application files ".docx, .pptx, .msg" to specifically include those file types.
:paramtype indexed_file_name_extensions: str
:keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to
continue indexing when an unsupported content type is encountered, and you don't know all the
content types (file extensions) in advance.
:paramtype fail_on_unsupported_content_type: bool
:keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
indexing if a document fails indexing.
:paramtype fail_on_unprocessable_document: bool
:keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this
property to true to still index storage metadata for blob content that is too large to process.
Oversized blobs are treated as errors by default. For limits on blob size, see
https://docs.microsoft.com/azure/search/search-limits-quotas-capacity.
:paramtype index_storage_metadata_only_for_oversized_documents: bool
:keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
headers, useful for mapping source fields to destination fields in an index.
:paramtype delimited_text_headers: str
:keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
delimiter for CSV files where each line starts a new document (for example, "|").
:paramtype delimited_text_delimiter: str
:keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line
of each blob contains headers.
:paramtype first_line_contains_headers: bool
:keyword document_root: For JSON arrays, given a structured or semi-structured document, you
can specify a path to the array using this property.
:paramtype document_root: str
:keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the
indexer which data to extract from image content when "imageAction" is set to a value other
than "none". This applies to embedded image content in a .PDF or other application, or image
files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata",
"allMetadata", "contentAndMetadata". Default value: "contentAndMetadata".
:paramtype data_to_extract: str or
~azure.search.documents.indexes.models.BlobIndexerDataToExtract
:keyword image_action: Determines how to process embedded images and image files in Azure blob
storage. Setting the "imageAction" configuration to any value other than "none" requires that
a skillset also be attached to that indexer. Possible values include: "none",
"generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none".
:paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
:keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data
that is an object representing the original file data downloaded from your blob data source.
This allows you to pass the original file data to a custom skill for processing within the
enrichment pipeline, or to the Document Extraction skill.
:paramtype allow_skillset_to_read_file_data: bool
:keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files
in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
:paramtype pdf_text_rotation_algorithm: str or
~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm
:keyword execution_environment: Specifies the environment in which the indexer should execute.
Possible values include: "standard", "private". Default value: "standard".
:paramtype execution_environment: str or
~azure.search.documents.indexes.models.IndexerExecutionEnvironment
:keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL
database data sources, specified in the format "hh:mm:ss".
:paramtype query_timeout: str
"""
super(IndexingParametersConfiguration, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.parsing_mode = kwargs.get('parsing_mode', "default")
self.excluded_file_name_extensions = kwargs.get('excluded_file_name_extensions', "")
self.indexed_file_name_extensions = kwargs.get('indexed_file_name_extensions', "")
self.fail_on_unsupported_content_type = kwargs.get('fail_on_unsupported_content_type', False)
self.fail_on_unprocessable_document = kwargs.get('fail_on_unprocessable_document', False)
self.index_storage_metadata_only_for_oversized_documents = kwargs.get('index_storage_metadata_only_for_oversized_documents', False)
self.delimited_text_headers = kwargs.get('delimited_text_headers', None)
self.delimited_text_delimiter = kwargs.get('delimited_text_delimiter', None)
self.first_line_contains_headers = kwargs.get('first_line_contains_headers', True)
self.document_root = kwargs.get('document_root', None)
self.data_to_extract = kwargs.get('data_to_extract', "contentAndMetadata")
self.image_action = kwargs.get('image_action', "none")
self.allow_skillset_to_read_file_data = kwargs.get('allow_skillset_to_read_file_data', False)
self.pdf_text_rotation_algorithm = kwargs.get('pdf_text_rotation_algorithm', "none")
self.execution_environment = kwargs.get('execution_environment', "standard")
self.query_timeout = kwargs.get('query_timeout', "00:05:00")
class IndexingSchedule(msrest.serialization.Model):
"""Represents a schedule for indexer execution.
All required parameters must be populated in order to send to Azure.
:ivar interval: Required. The interval of time between indexer executions.
:vartype interval: ~datetime.timedelta
:ivar start_time: The time when an indexer should start running.
:vartype start_time: ~datetime.datetime
"""
_validation = {
'interval': {'required': True},
}
_attribute_map = {
'interval': {'key': 'interval', 'type': 'duration'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
"""
:keyword interval: Required. The interval of time between indexer executions.
:paramtype interval: ~datetime.timedelta
:keyword start_time: The time when an indexer should start running.
:paramtype start_time: ~datetime.datetime
"""
super(IndexingSchedule, self).__init__(**kwargs)
self.interval = kwargs['interval']
self.start_time = kwargs.get('start_time', None)
class InputFieldMappingEntry(msrest.serialization.Model):
"""Input field mapping for a skill.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the input.
:vartype name: str
:ivar source: The source of the input.
:vartype source: str
:ivar source_context: The source context used for selecting recursive inputs.
:vartype source_context: str
:ivar inputs: The recursive inputs used when creating a complex type.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'source_context': {'key': 'sourceContext', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the input.
:paramtype name: str
:keyword source: The source of the input.
:paramtype source: str
:keyword source_context: The source context used for selecting recursive inputs.
:paramtype source_context: str
:keyword inputs: The recursive inputs used when creating a complex type.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
super(InputFieldMappingEntry, self).__init__(**kwargs)
self.name = kwargs['name']
self.source = kwargs.get('source', None)
self.source_context = kwargs.get('source_context', None)
self.inputs = kwargs.get('inputs', None)
class KeepTokenFilter(TokenFilter):
"""A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar keep_words: Required. The list of words to keep.
:vartype keep_words: list[str]
:ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default
is false.
:vartype lower_case_keep_words: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'keep_words': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'keep_words': {'key': 'keepWords', 'type': '[str]'},
'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword keep_words: Required. The list of words to keep.
:paramtype keep_words: list[str]
:keyword lower_case_keep_words: A value indicating whether to lower case all words first.
Default is false.
:paramtype lower_case_keep_words: bool
"""
super(KeepTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' # type: str
self.keep_words = kwargs['keep_words']
self.lower_case_keep_words = kwargs.get('lower_case_keep_words', False)
class KeyPhraseExtractionSkill(SearchIndexerSkill):
"""A skill that uses text analytics for key phrase extraction.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl",
"pt-PT", "pt-BR", "ru", "es", "sv".
:vartype default_language_code: str or
~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage
:ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all
identified key phrases will be returned.
:vartype max_key_phrase_count: int
:ivar model_version: The version of the model to use when calling the Text Analytics service.
It will default to the latest available when not specified. We recommend you do not specify
this value unless absolutely necessary.
:vartype model_version: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'max_key_phrase_count': {'key': 'maxKeyPhraseCount', 'type': 'int'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl",
"pt-PT", "pt-BR", "ru", "es", "sv".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage
:keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent,
all identified key phrases will be returned.
:paramtype max_key_phrase_count: int
:keyword model_version: The version of the model to use when calling the Text Analytics
service. It will default to the latest available when not specified. We recommend you do not
specify this value unless absolutely necessary.
:paramtype model_version: str
"""
super(KeyPhraseExtractionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.max_key_phrase_count = kwargs.get('max_key_phrase_count', None)
self.model_version = kwargs.get('model_version', None)
class KeywordMarkerTokenFilter(TokenFilter):
"""Marks terms as keywords. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar keywords: Required. A list of words to mark as keywords.
:vartype keywords: list[str]
:ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted
to lower case first. Default is false.
:vartype ignore_case: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'keywords': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'keywords': {'key': 'keywords', 'type': '[str]'},
'ignore_case': {'key': 'ignoreCase', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword keywords: Required. A list of words to mark as keywords.
:paramtype keywords: list[str]
:keyword ignore_case: A value indicating whether to ignore case. If true, all words are
converted to lower case first. Default is false.
:paramtype ignore_case: bool
"""
super(KeywordMarkerTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' # type: str
self.keywords = kwargs['keywords']
self.ignore_case = kwargs.get('ignore_case', False)
class KeywordTokenizer(LexicalTokenizer):
"""Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar buffer_size: The read buffer size in bytes. Default is 256.
:vartype buffer_size: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'buffer_size': {'key': 'bufferSize', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword buffer_size: The read buffer size in bytes. Default is 256.
:paramtype buffer_size: int
"""
super(KeywordTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' # type: str
self.buffer_size = kwargs.get('buffer_size', 256)
class KeywordTokenizerV2(LexicalTokenizer):
"""Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:vartype max_token_length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:paramtype max_token_length: int
"""
super(KeywordTokenizerV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' # type: str
self.max_token_length = kwargs.get('max_token_length', 256)
class LanguageDetectionSkill(SearchIndexerSkill):
"""A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_country_hint: A country code to use as a hint to the language detection model if
it cannot disambiguate the language.
:vartype default_country_hint: str
:ivar model_version: The version of the model to use when calling the Text Analytics service.
It will default to the latest available when not specified. We recommend you do not specify
this value unless absolutely necessary.
:vartype model_version: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_country_hint': {'key': 'defaultCountryHint', 'type': 'str'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_country_hint: A country code to use as a hint to the language detection model
if it cannot disambiguate the language.
:paramtype default_country_hint: str
:keyword model_version: The version of the model to use when calling the Text Analytics
service. It will default to the latest available when not specified. We recommend you do not
specify this value unless absolutely necessary.
:paramtype model_version: str
"""
super(LanguageDetectionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' # type: str
self.default_country_hint = kwargs.get('default_country_hint', None)
self.model_version = kwargs.get('model_version', None)
class LengthTokenFilter(TokenFilter):
"""Removes words that are too long or too short. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less
than the value of max.
:vartype min_length: int
:ivar max_length: The maximum length in characters. Default and maximum is 300.
:vartype max_length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'min_length': {'maximum': 300},
'max_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_length': {'key': 'min', 'type': 'int'},
'max_length': {'key': 'max', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be
less than the value of max.
:paramtype min_length: int
:keyword max_length: The maximum length in characters. Default and maximum is 300.
:paramtype max_length: int
"""
super(LengthTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' # type: str
self.min_length = kwargs.get('min_length', 0)
self.max_length = kwargs.get('max_length', 300)
class LimitTokenFilter(TokenFilter):
"""Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar max_token_count: The maximum number of tokens to produce. Default is 1.
:vartype max_token_count: int
:ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed
even if maxTokenCount is reached. Default is false.
:vartype consume_all_tokens: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_count': {'key': 'maxTokenCount', 'type': 'int'},
'consume_all_tokens': {'key': 'consumeAllTokens', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_count: The maximum number of tokens to produce. Default is 1.
:paramtype max_token_count: int
:keyword consume_all_tokens: A value indicating whether all tokens from the input must be
consumed even if maxTokenCount is reached. Default is false.
:paramtype consume_all_tokens: bool
"""
super(LimitTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' # type: str
self.max_token_count = kwargs.get('max_token_count', 1)
self.consume_all_tokens = kwargs.get('consume_all_tokens', False)
class ListDataSourcesResult(msrest.serialization.Model):
"""Response from a List Datasources request. If successful, it includes the full definitions of all datasources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar data_sources: Required. The datasources in the Search service.
:vartype data_sources: list[~azure.search.documents.indexes.models.SearchIndexerDataSource]
"""
_validation = {
'data_sources': {'required': True, 'readonly': True},
}
_attribute_map = {
'data_sources': {'key': 'value', 'type': '[SearchIndexerDataSource]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ListDataSourcesResult, self).__init__(**kwargs)
self.data_sources = None
class ListIndexersResult(msrest.serialization.Model):
"""Response from a List Indexers request. If successful, it includes the full definitions of all indexers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar indexers: Required. The indexers in the Search service.
:vartype indexers: list[~azure.search.documents.indexes.models.SearchIndexer]
"""
_validation = {
'indexers': {'required': True, 'readonly': True},
}
_attribute_map = {
'indexers': {'key': 'value', 'type': '[SearchIndexer]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ListIndexersResult, self).__init__(**kwargs)
self.indexers = None
class ListIndexesResult(msrest.serialization.Model):
"""Response from a List Indexes request. If successful, it includes the full definitions of all indexes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar indexes: Required. The indexes in the Search service.
:vartype indexes: list[~azure.search.documents.indexes.models.SearchIndex]
"""
_validation = {
'indexes': {'required': True, 'readonly': True},
}
_attribute_map = {
'indexes': {'key': 'value', 'type': '[SearchIndex]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ListIndexesResult, self).__init__(**kwargs)
self.indexes = None
class ListSkillsetsResult(msrest.serialization.Model):
"""Response from a list skillset request. If successful, it includes the full definitions of all skillsets.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar skillsets: Required. The skillsets defined in the Search service.
:vartype skillsets: list[~azure.search.documents.indexes.models.SearchIndexerSkillset]
"""
_validation = {
'skillsets': {'required': True, 'readonly': True},
}
_attribute_map = {
'skillsets': {'key': 'value', 'type': '[SearchIndexerSkillset]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ListSkillsetsResult, self).__init__(**kwargs)
self.skillsets = None
class ListSynonymMapsResult(msrest.serialization.Model):
"""Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar synonym_maps: Required. The synonym maps in the Search service.
:vartype synonym_maps: list[~azure.search.documents.indexes.models.SynonymMap]
"""
_validation = {
'synonym_maps': {'required': True, 'readonly': True},
}
_attribute_map = {
'synonym_maps': {'key': 'value', 'type': '[SynonymMap]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ListSynonymMapsResult, self).__init__(**kwargs)
self.synonym_maps = None
class LuceneStandardAnalyzer(LexicalAnalyzer):
"""Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:vartype max_token_length: int
:ivar stopwords: A list of stopwords.
:vartype stopwords: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
'stopwords': {'key': 'stopwords', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the analyzer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:paramtype max_token_length: int
:keyword stopwords: A list of stopwords.
:paramtype stopwords: list[str]
"""
super(LuceneStandardAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
self.stopwords = kwargs.get('stopwords', None)
class LuceneStandardTokenizer(LexicalTokenizer):
"""Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split.
:vartype max_token_length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split.
:paramtype max_token_length: int
"""
super(LuceneStandardTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
class LuceneStandardTokenizerV2(LexicalTokenizer):
"""Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:vartype max_token_length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:paramtype max_token_length: int
"""
super(LuceneStandardTokenizerV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
class MagnitudeScoringFunction(ScoringFunction):
"""Defines a function that boosts scores based on the magnitude of a numeric field.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
:vartype type: str
:ivar field_name: Required. The name of the field used as input to the scoring function.
:vartype field_name: str
:ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
1.0.
:vartype boost: float
:ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:ivar parameters: Required. Parameter values for the magnitude scoring function.
:vartype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
"""
_validation = {
'type': {'required': True},
'field_name': {'required': True},
'boost': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'boost': {'key': 'boost', 'type': 'float'},
'interpolation': {'key': 'interpolation', 'type': 'str'},
'parameters': {'key': 'magnitude', 'type': 'MagnitudeScoringParameters'},
}
def __init__(
self,
**kwargs
):
"""
:keyword field_name: Required. The name of the field used as input to the scoring function.
:paramtype field_name: str
:keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
to 1.0.
:paramtype boost: float
:keyword interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:paramtype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:keyword parameters: Required. Parameter values for the magnitude scoring function.
:paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
"""
super(MagnitudeScoringFunction, self).__init__(**kwargs)
self.type = 'magnitude' # type: str
self.parameters = kwargs['parameters']
class MagnitudeScoringParameters(msrest.serialization.Model):
"""Provides parameter values to a magnitude scoring function.
All required parameters must be populated in order to send to Azure.
:ivar boosting_range_start: Required. The field value at which boosting starts.
:vartype boosting_range_start: float
:ivar boosting_range_end: Required. The field value at which boosting ends.
:vartype boosting_range_end: float
:ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
boost for field values beyond the range end value; default is false.
:vartype should_boost_beyond_range_by_constant: bool
"""
_validation = {
'boosting_range_start': {'required': True},
'boosting_range_end': {'required': True},
}
_attribute_map = {
'boosting_range_start': {'key': 'boostingRangeStart', 'type': 'float'},
'boosting_range_end': {'key': 'boostingRangeEnd', 'type': 'float'},
'should_boost_beyond_range_by_constant': {'key': 'constantBoostBeyondRange', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword boosting_range_start: Required. The field value at which boosting starts.
:paramtype boosting_range_start: float
:keyword boosting_range_end: Required. The field value at which boosting ends.
:paramtype boosting_range_end: float
:keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
boost for field values beyond the range end value; default is false.
:paramtype should_boost_beyond_range_by_constant: bool
"""
super(MagnitudeScoringParameters, self).__init__(**kwargs)
self.boosting_range_start = kwargs['boosting_range_start']
self.boosting_range_end = kwargs['boosting_range_end']
self.should_boost_beyond_range_by_constant = kwargs.get('should_boost_beyond_range_by_constant', None)
class MappingCharFilter(CharFilter):
"""A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences
of the character "a" will be replaced with character "b").
:vartype mappings: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'mappings': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'mappings': {'key': 'mappings', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword mappings: Required. A list of mappings of the following format: "a=>b" (all
occurrences of the character "a" will be replaced with character "b").
:paramtype mappings: list[str]
"""
super(MappingCharFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' # type: str
self.mappings = kwargs['mappings']
class MergeSkill(SearchIndexerSkill):
"""A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an
empty space.
:vartype insert_pre_tag: str
:ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an
empty space.
:vartype insert_post_tag: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'insert_pre_tag': {'key': 'insertPreTag', 'type': 'str'},
'insert_post_tag': {'key': 'insertPostTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is
an empty space.
:paramtype insert_pre_tag: str
:keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is
an empty space.
:paramtype insert_post_tag: str
"""
super(MergeSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.MergeSkill' # type: str
self.insert_pre_tag = kwargs.get('insert_pre_tag', " ")
self.insert_post_tag = kwargs.get('insert_post_tag', " ")
class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer):
"""Divides text using language-specific rules and reduces words to their base forms.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
:vartype max_token_length: int
:ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as
the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
:vartype is_search_tokenizer: bool
:ivar language: The language to use. The default is English. Possible values include: "arabic",
"bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
"estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian",
"icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam",
"marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
"romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish",
"swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu".
:vartype language: str or
~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
'is_search_tokenizer': {'key': 'isSearchTokenizer', 'type': 'bool'},
'language': {'key': 'language', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
:paramtype max_token_length: int
:keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
:paramtype is_search_tokenizer: bool
:keyword language: The language to use. The default is English. Possible values include:
"arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
"estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian",
"icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam",
"marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
"romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish",
"swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu".
:paramtype language: str or
~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage
"""
super(MicrosoftLanguageStemmingTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
self.is_search_tokenizer = kwargs.get('is_search_tokenizer', False)
self.language = kwargs.get('language', None)
class MicrosoftLanguageTokenizer(LexicalTokenizer):
"""Divides text using language-specific rules.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
:vartype max_token_length: int
:ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as
the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
:vartype is_search_tokenizer: bool
:ivar language: The language to use. The default is English. Possible values include: "bangla",
"bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech",
"danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic",
"indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi",
"norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian",
"russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil",
"telugu", "thai", "ukrainian", "urdu", "vietnamese".
:vartype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
'is_search_tokenizer': {'key': 'isSearchTokenizer', 'type': 'bool'},
'language': {'key': 'language', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
:paramtype max_token_length: int
:keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
:paramtype is_search_tokenizer: bool
:keyword language: The language to use. The default is English. Possible values include:
"bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian",
"czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi",
"icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam",
"marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
"romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish",
"tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese".
:paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
"""
super(MicrosoftLanguageTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
self.is_search_tokenizer = kwargs.get('is_search_tokenizer', False)
self.language = kwargs.get('language', None)
class NGramTokenFilter(TokenFilter):
"""Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
:vartype min_gram: int
:ivar max_gram: The maximum n-gram length. Default is 2.
:vartype max_gram: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_gram': {'key': 'minGram', 'type': 'int'},
'max_gram': {'key': 'maxGram', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
:paramtype min_gram: int
:keyword max_gram: The maximum n-gram length. Default is 2.
:paramtype max_gram: int
"""
super(NGramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' # type: str
self.min_gram = kwargs.get('min_gram', 1)
self.max_gram = kwargs.get('max_gram', 2)
class NGramTokenFilterV2(TokenFilter):
"""Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
value of maxGram.
:vartype min_gram: int
:ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:vartype max_gram: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'min_gram': {'maximum': 300},
'max_gram': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_gram': {'key': 'minGram', 'type': 'int'},
'max_gram': {'key': 'maxGram', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
the value of maxGram.
:paramtype min_gram: int
:keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:paramtype max_gram: int
"""
super(NGramTokenFilterV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' # type: str
self.min_gram = kwargs.get('min_gram', 1)
self.max_gram = kwargs.get('max_gram', 2)
class NGramTokenizer(LexicalTokenizer):
"""Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
value of maxGram.
:vartype min_gram: int
:ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:vartype max_gram: int
:ivar token_chars: Character classes to keep in the tokens.
:vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'min_gram': {'maximum': 300},
'max_gram': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'min_gram': {'key': 'minGram', 'type': 'int'},
'max_gram': {'key': 'maxGram', 'type': 'int'},
'token_chars': {'key': 'tokenChars', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
the value of maxGram.
:paramtype min_gram: int
:keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
:paramtype max_gram: int
:keyword token_chars: Character classes to keep in the tokens.
:paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
super(NGramTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' # type: str
self.min_gram = kwargs.get('min_gram', 1)
self.max_gram = kwargs.get('max_gram', 2)
self.token_chars = kwargs.get('token_chars', None)
class OcrSkill(SearchIndexerSkill):
"""A skill that extracts text from image files.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el",
"hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl",
"sr-Latn", "sk", "unk".
:vartype default_language_code: str or ~azure.search.documents.indexes.models.OcrSkillLanguage
:ivar should_detect_orientation: A value indicating to turn orientation detection on or not.
Default is false.
:vartype should_detect_orientation: bool
:ivar line_ending: Defines the sequence of characters to use between the lines of text
recognized by the OCR skill. The default value is "space". Possible values include: "space",
"carriageReturn", "lineFeed", "carriageReturnLineFeed".
:vartype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'should_detect_orientation': {'key': 'detectOrientation', 'type': 'bool'},
'line_ending': {'key': 'lineEnding', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el",
"hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl",
"sr-Latn", "sk", "unk".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.OcrSkillLanguage
:keyword should_detect_orientation: A value indicating to turn orientation detection on or not.
Default is false.
:paramtype should_detect_orientation: bool
:keyword line_ending: Defines the sequence of characters to use between the lines of text
recognized by the OCR skill. The default value is "space". Possible values include: "space",
"carriageReturn", "lineFeed", "carriageReturnLineFeed".
:paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
"""
super(OcrSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.should_detect_orientation = kwargs.get('should_detect_orientation', False)
self.line_ending = kwargs.get('line_ending', None)
class OutputFieldMappingEntry(msrest.serialization.Model):
"""Output field mapping for a skill.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the output defined by the skill.
:vartype name: str
:ivar target_name: The target name of the output. It is optional and default to name.
:vartype target_name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'target_name': {'key': 'targetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the output defined by the skill.
:paramtype name: str
:keyword target_name: The target name of the output. It is optional and default to name.
:paramtype target_name: str
"""
super(OutputFieldMappingEntry, self).__init__(**kwargs)
self.name = kwargs['name']
self.target_name = kwargs.get('target_name', None)
class PathHierarchyTokenizerV2(LexicalTokenizer):
"""Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar delimiter: The delimiter character to use. Default is "/".
:vartype delimiter: str
:ivar replacement: A value that, if set, replaces the delimiter character. Default is "/".
:vartype replacement: str
:ivar max_token_length: The maximum token length. Default and maximum is 300.
:vartype max_token_length: int
:ivar reverse_token_order: A value indicating whether to generate tokens in reverse order.
Default is false.
:vartype reverse_token_order: bool
:ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
:vartype number_of_tokens_to_skip: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'delimiter': {'key': 'delimiter', 'type': 'str'},
'replacement': {'key': 'replacement', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
'reverse_token_order': {'key': 'reverse', 'type': 'bool'},
'number_of_tokens_to_skip': {'key': 'skip', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword delimiter: The delimiter character to use. Default is "/".
:paramtype delimiter: str
:keyword replacement: A value that, if set, replaces the delimiter character. Default is "/".
:paramtype replacement: str
:keyword max_token_length: The maximum token length. Default and maximum is 300.
:paramtype max_token_length: int
:keyword reverse_token_order: A value indicating whether to generate tokens in reverse order.
Default is false.
:paramtype reverse_token_order: bool
:keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
:paramtype number_of_tokens_to_skip: int
"""
super(PathHierarchyTokenizerV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' # type: str
self.delimiter = kwargs.get('delimiter', "/")
self.replacement = kwargs.get('replacement', "/")
self.max_token_length = kwargs.get('max_token_length', 300)
self.reverse_token_order = kwargs.get('reverse_token_order', False)
self.number_of_tokens_to_skip = kwargs.get('number_of_tokens_to_skip', 0)
class PatternAnalyzer(LexicalAnalyzer):
"""Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
:vartype lower_case_terms: bool
:ivar pattern: A regular expression pattern to match token separators. Default is an expression
that matches one or more non-word characters.
:vartype pattern: str
:ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE",
"COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
:vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags
:ivar stopwords: A list of stopwords.
:vartype stopwords: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'lower_case_terms': {'key': 'lowercase', 'type': 'bool'},
'pattern': {'key': 'pattern', 'type': 'str'},
'flags': {'key': 'flags', 'type': 'str'},
'stopwords': {'key': 'stopwords', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the analyzer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
:paramtype lower_case_terms: bool
:keyword pattern: A regular expression pattern to match token separators. Default is an
expression that matches one or more non-word characters.
:paramtype pattern: str
:keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
"CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
:paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
:keyword stopwords: A list of stopwords.
:paramtype stopwords: list[str]
"""
super(PatternAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' # type: str
self.lower_case_terms = kwargs.get('lower_case_terms', True)
self.pattern = kwargs.get('pattern', "\W+")
self.flags = kwargs.get('flags', None)
self.stopwords = kwargs.get('stopwords', None)
class PatternCaptureTokenFilter(TokenFilter):
"""Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar patterns: Required. A list of patterns to match against each token.
:vartype patterns: list[str]
:ivar preserve_original: A value indicating whether to return the original token even if one of
the patterns matches. Default is true.
:vartype preserve_original: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'patterns': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'patterns': {'key': 'patterns', 'type': '[str]'},
'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword patterns: Required. A list of patterns to match against each token.
:paramtype patterns: list[str]
:keyword preserve_original: A value indicating whether to return the original token even if one
of the patterns matches. Default is true.
:paramtype preserve_original: bool
"""
super(PatternCaptureTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' # type: str
self.patterns = kwargs['patterns']
self.preserve_original = kwargs.get('preserve_original', True)
class PatternReplaceCharFilter(CharFilter):
"""A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar pattern: Required. A regular expression pattern.
:vartype pattern: str
:ivar replacement: Required. The replacement text.
:vartype replacement: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'pattern': {'required': True},
'replacement': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'pattern': {'key': 'pattern', 'type': 'str'},
'replacement': {'key': 'replacement', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword pattern: Required. A regular expression pattern.
:paramtype pattern: str
:keyword replacement: Required. The replacement text.
:paramtype replacement: str
"""
super(PatternReplaceCharFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' # type: str
self.pattern = kwargs['pattern']
self.replacement = kwargs['replacement']
class PatternReplaceTokenFilter(TokenFilter):
"""A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar pattern: Required. A regular expression pattern.
:vartype pattern: str
:ivar replacement: Required. The replacement text.
:vartype replacement: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'pattern': {'required': True},
'replacement': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'pattern': {'key': 'pattern', 'type': 'str'},
'replacement': {'key': 'replacement', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword pattern: Required. A regular expression pattern.
:paramtype pattern: str
:keyword replacement: Required. The replacement text.
:paramtype replacement: str
"""
super(PatternReplaceTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' # type: str
self.pattern = kwargs['pattern']
self.replacement = kwargs['replacement']
class PatternTokenizer(LexicalTokenizer):
"""Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar pattern: A regular expression pattern to match token separators. Default is an expression
that matches one or more non-word characters.
:vartype pattern: str
:ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE",
"COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
:vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags
:ivar group: The zero-based ordinal of the matching group in the regular expression pattern to
extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
:vartype group: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'pattern': {'key': 'pattern', 'type': 'str'},
'flags': {'key': 'flags', 'type': 'str'},
'group': {'key': 'group', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword pattern: A regular expression pattern to match token separators. Default is an
expression that matches one or more non-word characters.
:paramtype pattern: str
:keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
"CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
:paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
:keyword group: The zero-based ordinal of the matching group in the regular expression pattern
to extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
:paramtype group: int
"""
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' # type: str
self.pattern = kwargs.get('pattern', "\W+")
self.flags = kwargs.get('flags', None)
self.group = kwargs.get('group', -1)
class PhoneticTokenFilter(TokenFilter):
"""Create tokens for phonetic matches. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
"metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2",
"cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse".
:vartype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
:ivar replace_original_tokens: A value indicating whether encoded tokens should replace
original tokens. If false, encoded tokens are added as synonyms. Default is true.
:vartype replace_original_tokens: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'encoder': {'key': 'encoder', 'type': 'str'},
'replace_original_tokens': {'key': 'replace', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
"metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2",
"cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse".
:paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
:keyword replace_original_tokens: A value indicating whether encoded tokens should replace
original tokens. If false, encoded tokens are added as synonyms. Default is true.
:paramtype replace_original_tokens: bool
"""
super(PhoneticTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' # type: str
self.encoder = kwargs.get('encoder', None)
self.replace_original_tokens = kwargs.get('replace_original_tokens', True)
class PIIDetectionSkill(SearchIndexerSkill):
"""Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
:vartype default_language_code: str
:ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:vartype minimum_precision: float
:ivar masking_mode: A parameter that provides various ways to mask the personal information
detected in the input text. Default is 'none'. Possible values include: "none", "replace".
:vartype masking_mode: str or
~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode
:ivar masking_character: The character used to mask the text if the maskingMode parameter is
set to replace. Default is '*'.
:vartype masking_character: str
:ivar model_version: The version of the model to use when calling the Text Analytics service.
It will default to the latest available when not specified. We recommend you do not specify
this value unless absolutely necessary.
:vartype model_version: str
:ivar pii_categories: A list of PII entity categories that should be extracted and masked.
:vartype pii_categories: list[str]
:ivar domain: If specified, will set the PII domain to include only a subset of the entity
categories. Possible values include: 'phi', 'none'. Default is 'none'.
:vartype domain: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
'minimum_precision': {'maximum': 1, 'minimum': 0},
'masking_character': {'max_length': 1, 'min_length': 0},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'minimum_precision': {'key': 'minimumPrecision', 'type': 'float'},
'masking_mode': {'key': 'maskingMode', 'type': 'str'},
'masking_character': {'key': 'maskingCharacter', 'type': 'str'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
'pii_categories': {'key': 'piiCategories', 'type': '[str]'},
'domain': {'key': 'domain', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
:paramtype default_language_code: str
:keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
:paramtype minimum_precision: float
:keyword masking_mode: A parameter that provides various ways to mask the personal information
detected in the input text. Default is 'none'. Possible values include: "none", "replace".
:paramtype masking_mode: str or
~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode
:keyword masking_character: The character used to mask the text if the maskingMode parameter is
set to replace. Default is '*'.
:paramtype masking_character: str
:keyword model_version: The version of the model to use when calling the Text Analytics
service. It will default to the latest available when not specified. We recommend you do not
specify this value unless absolutely necessary.
:paramtype model_version: str
:keyword pii_categories: A list of PII entity categories that should be extracted and masked.
:paramtype pii_categories: list[str]
:keyword domain: If specified, will set the PII domain to include only a subset of the entity
categories. Possible values include: 'phi', 'none'. Default is 'none'.
:paramtype domain: str
"""
super(PIIDetectionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.PIIDetectionSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.minimum_precision = kwargs.get('minimum_precision', None)
self.masking_mode = kwargs.get('masking_mode', None)
self.masking_character = kwargs.get('masking_character', None)
self.model_version = kwargs.get('model_version', None)
self.pii_categories = kwargs.get('pii_categories', None)
self.domain = kwargs.get('domain', None)
class PrioritizedFields(msrest.serialization.Model):
"""Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers.
:ivar title_field: Defines the title field to be used for semantic ranking, captions,
highlights, and answers. If you don't have a title field in your index, leave this blank.
:vartype title_field: ~azure.search.documents.indexes.models.SemanticField
:ivar prioritized_content_fields: Defines the content fields to be used for semantic ranking,
captions, highlights, and answers. For the best result, the selected fields should contain text
in natural language form. The order of the fields in the array represents their priority.
Fields with lower priority may get truncated if the content is long.
:vartype prioritized_content_fields: list[~azure.search.documents.indexes.models.SemanticField]
:ivar prioritized_keywords_fields: Defines the keyword fields to be used for semantic ranking,
captions, highlights, and answers. For the best result, the selected fields should contain a
list of keywords. The order of the fields in the array represents their priority. Fields with
lower priority may get truncated if the content is long.
:vartype prioritized_keywords_fields:
list[~azure.search.documents.indexes.models.SemanticField]
"""
_attribute_map = {
'title_field': {'key': 'titleField', 'type': 'SemanticField'},
'prioritized_content_fields': {'key': 'prioritizedContentFields', 'type': '[SemanticField]'},
'prioritized_keywords_fields': {'key': 'prioritizedKeywordsFields', 'type': '[SemanticField]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword title_field: Defines the title field to be used for semantic ranking, captions,
highlights, and answers. If you don't have a title field in your index, leave this blank.
:paramtype title_field: ~azure.search.documents.indexes.models.SemanticField
:keyword prioritized_content_fields: Defines the content fields to be used for semantic
ranking, captions, highlights, and answers. For the best result, the selected fields should
contain text in natural language form. The order of the fields in the array represents their
priority. Fields with lower priority may get truncated if the content is long.
:paramtype prioritized_content_fields:
list[~azure.search.documents.indexes.models.SemanticField]
:keyword prioritized_keywords_fields: Defines the keyword fields to be used for semantic
ranking, captions, highlights, and answers. For the best result, the selected fields should
contain a list of keywords. The order of the fields in the array represents their priority.
Fields with lower priority may get truncated if the content is long.
:paramtype prioritized_keywords_fields:
list[~azure.search.documents.indexes.models.SemanticField]
"""
super(PrioritizedFields, self).__init__(**kwargs)
self.title_field = kwargs.get('title_field', None)
self.prioritized_content_fields = kwargs.get('prioritized_content_fields', None)
self.prioritized_keywords_fields = kwargs.get('prioritized_keywords_fields', None)
class RequestOptions(msrest.serialization.Model):
"""Parameter group.
:ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
:vartype x_ms_client_request_id: str
"""
_attribute_map = {
'x_ms_client_request_id': {'key': 'x-ms-client-request-id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
:paramtype x_ms_client_request_id: str
"""
super(RequestOptions, self).__init__(**kwargs)
self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None)
class ResourceCounter(msrest.serialization.Model):
"""Represents a resource's usage and quota.
All required parameters must be populated in order to send to Azure.
:ivar usage: Required. The resource usage amount.
:vartype usage: long
:ivar quota: The resource amount quota.
:vartype quota: long
"""
_validation = {
'usage': {'required': True},
}
_attribute_map = {
'usage': {'key': 'usage', 'type': 'long'},
'quota': {'key': 'quota', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
:keyword usage: Required. The resource usage amount.
:paramtype usage: long
:keyword quota: The resource amount quota.
:paramtype quota: long
"""
super(ResourceCounter, self).__init__(**kwargs)
self.usage = kwargs['usage']
self.quota = kwargs.get('quota', None)
class ScoringProfile(msrest.serialization.Model):
"""Defines parameters for a search index that influence scoring in search queries.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the scoring profile.
:vartype name: str
:ivar text_weights: Parameters that boost scoring based on text matches in certain index
fields.
:vartype text_weights: ~azure.search.documents.indexes.models.TextWeights
:ivar functions: The collection of functions that influence the scoring of documents.
:vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
:ivar function_aggregation: A value indicating how the results of individual scoring functions
should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible
values include: "sum", "average", "minimum", "maximum", "firstMatching".
:vartype function_aggregation: str or
~azure.search.documents.indexes.models.ScoringFunctionAggregation
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'text_weights': {'key': 'text', 'type': 'TextWeights'},
'functions': {'key': 'functions', 'type': '[ScoringFunction]'},
'function_aggregation': {'key': 'functionAggregation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the scoring profile.
:paramtype name: str
:keyword text_weights: Parameters that boost scoring based on text matches in certain index
fields.
:paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights
:keyword functions: The collection of functions that influence the scoring of documents.
:paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
:keyword function_aggregation: A value indicating how the results of individual scoring
functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions.
Possible values include: "sum", "average", "minimum", "maximum", "firstMatching".
:paramtype function_aggregation: str or
~azure.search.documents.indexes.models.ScoringFunctionAggregation
"""
super(ScoringProfile, self).__init__(**kwargs)
self.name = kwargs['name']
self.text_weights = kwargs.get('text_weights', None)
self.functions = kwargs.get('functions', None)
self.function_aggregation = kwargs.get('function_aggregation', None)
class SearchError(msrest.serialization.Model):
"""Describes an error condition for the Azure Cognitive Search API.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar code: One of a server-defined set of error codes.
:vartype code: str
:ivar message: Required. A human-readable representation of the error.
:vartype message: str
:ivar details: An array of details about specific errors that led to this reported error.
:vartype details: list[~azure.search.documents.indexes.models.SearchError]
"""
_validation = {
'code': {'readonly': True},
'message': {'required': True, 'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[SearchError]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchError, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class SearchField(msrest.serialization.Model):
"""Represents a field in an index definition, which describes the name, data type, and search behavior of a field.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the field, which must be unique within the fields collection
of the index or parent field.
:vartype name: str
:ivar type: Required. The data type of the field. Possible values include: "Edm.String",
"Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
"Edm.GeographyPoint", "Edm.ComplexType".
:vartype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
:ivar key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type Edm.String. Key fields can be used to look up documents directly and update or delete
specific documents. Default is false for simple fields and null for complex fields.
:vartype key: bool
:ivar retrievable: A value indicating whether the field can be returned in a search result. You
can disable this option if you want to use a field (for example, margin) as a filter, sorting,
or scoring mechanism but do not want the field to be visible to the end user. This property
must be true for key fields, and it must be null for complex fields. This property can be
changed on existing fields. Enabling this property does not cause any increase in index storage
requirements. Default is true for simple fields and null for complex fields.
:vartype retrievable: bool
:ivar searchable: A value indicating whether the field is full-text searchable. This means it
will undergo analysis such as word-breaking during indexing. If you set a searchable field to a
value like "sunny day", internally it will be split into the individual tokens "sunny" and
"day". This enables full-text searches for these terms. Fields of type Edm.String or
Collection(Edm.String) are searchable by default. This property must be false for simple fields
of other non-string data types, and it must be null for complex fields. Note: searchable fields
consume extra space in your index since Azure Cognitive Search will store an additional
tokenized version of the field value for full-text searches. If you want to save space in your
index and you don't need a field to be included in searches, set searchable to false.
:vartype searchable: bool
:ivar filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields of type
Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
$filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
must be null for complex fields. Default is true for simple fields and null for complex fields.
:vartype filterable: bool
:ivar sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. A simple field can be sortable only if it
is single-valued (it has a single value in the scope of the parent document). Simple collection
fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex
collections are also multi-valued, and therefore cannot be sortable. This is true whether it's
an immediate parent field, or an ancestor field, that's the complex collection. Complex fields
cannot be sortable and the sortable property must be null for such fields. The default for
sortable is true for single-valued simple fields, false for multi-valued simple fields, and
null for complex fields.
:vartype sortable: bool
:ivar facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
fields.
:vartype facetable: bool
:ivar analyzer: The name of the analyzer to use for the field. This option can be used only
with searchable fields and it can't be set together with either searchAnalyzer or
indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
"bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene",
"zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
"cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
"en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
"fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
"gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
"is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
"ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
"lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
"no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene",
"pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
"ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
"es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
:vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:ivar search_analyzer: The name of the analyzer used at search time for the field. This option
can be used only with searchable fields. It must be set together with indexAnalyzer and it
cannot be set together with the analyzer option. This property cannot be set to the name of a
language analyzer; use the analyzer property instead if you need a language analyzer. This
analyzer can be updated on an existing field. Must be null for complex fields. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
"zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
"da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
"et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
"de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
"hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
"id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
"kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
"ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
"pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
"sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:vartype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option
can be used only with searchable fields. It must be set together with searchAnalyzer and it
cannot be set together with the analyzer option. This property cannot be set to the name of a
language analyzer; use the analyzer property instead if you need a language analyzer. Once the
analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
"bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft",
"zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
"cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft",
"en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene",
"gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft",
"he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft",
"id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft",
"ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene",
"lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene",
"fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
"sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:vartype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:ivar normalizer: The name of the normalizer to use for the field. This option can be used only
with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it
cannot be changed for the field. Must be null for complex fields. Possible values include:
"asciifolding", "elision", "lowercase", "standard", "uppercase".
:vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
:ivar synonym_maps: A list of the names of synonym maps to associate with this field. This
option can be used only with searchable fields. Currently only one synonym map per field is
supported. Assigning a synonym map to a field ensures that query terms targeting that field are
expanded at query-time using the rules in the synonym map. This attribute can be changed on
existing fields. Must be null or an empty collection for complex fields.
:vartype synonym_maps: list[str]
:ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or
Collection(Edm.ComplexType). Must be null or empty for simple fields.
:vartype fields: list[~azure.search.documents.indexes.models.SearchField]
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key': {'key': 'key', 'type': 'bool'},
'retrievable': {'key': 'retrievable', 'type': 'bool'},
'searchable': {'key': 'searchable', 'type': 'bool'},
'filterable': {'key': 'filterable', 'type': 'bool'},
'sortable': {'key': 'sortable', 'type': 'bool'},
'facetable': {'key': 'facetable', 'type': 'bool'},
'analyzer': {'key': 'analyzer', 'type': 'str'},
'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'},
'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'},
'normalizer': {'key': 'normalizer', 'type': 'str'},
'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'},
'fields': {'key': 'fields', 'type': '[SearchField]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the field, which must be unique within the fields
collection of the index or parent field.
:paramtype name: str
:keyword type: Required. The data type of the field. Possible values include: "Edm.String",
"Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
"Edm.GeographyPoint", "Edm.ComplexType".
:paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
:keyword key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type Edm.String. Key fields can be used to look up documents directly and update or delete
specific documents. Default is false for simple fields and null for complex fields.
:paramtype key: bool
:keyword retrievable: A value indicating whether the field can be returned in a search result.
You can disable this option if you want to use a field (for example, margin) as a filter,
sorting, or scoring mechanism but do not want the field to be visible to the end user. This
property must be true for key fields, and it must be null for complex fields. This property can
be changed on existing fields. Enabling this property does not cause any increase in index
storage requirements. Default is true for simple fields and null for complex fields.
:paramtype retrievable: bool
:keyword searchable: A value indicating whether the field is full-text searchable. This means
it will undergo analysis such as word-breaking during indexing. If you set a searchable field
to a value like "sunny day", internally it will be split into the individual tokens "sunny" and
"day". This enables full-text searches for these terms. Fields of type Edm.String or
Collection(Edm.String) are searchable by default. This property must be false for simple fields
of other non-string data types, and it must be null for complex fields. Note: searchable fields
consume extra space in your index since Azure Cognitive Search will store an additional
tokenized version of the field value for full-text searches. If you want to save space in your
index and you don't need a field to be included in searches, set searchable to false.
:paramtype searchable: bool
:keyword filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields of type
Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
$filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
must be null for complex fields. Default is true for simple fields and null for complex fields.
:paramtype filterable: bool
:keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. A simple field can be sortable only if it
is single-valued (it has a single value in the scope of the parent document). Simple collection
fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex
collections are also multi-valued, and therefore cannot be sortable. This is true whether it's
an immediate parent field, or an ancestor field, that's the complex collection. Complex fields
cannot be sortable and the sortable property must be null for such fields. The default for
sortable is true for single-valued simple fields, false for multi-valued simple fields, and
null for complex fields.
:paramtype sortable: bool
:keyword facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
fields.
:paramtype facetable: bool
:keyword analyzer: The name of the analyzer to use for the field. This option can be used only
with searchable fields and it can't be set together with either searchAnalyzer or
indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
"bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene",
"zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
"cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
"en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
"fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
"gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
"is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
"ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
"lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
"no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene",
"pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
"ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
"es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
:paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword search_analyzer: The name of the analyzer used at search time for the field. This
option can be used only with searchable fields. It must be set together with indexAnalyzer and
it cannot be set together with the analyzer option. This property cannot be set to the name of
a language analyzer; use the analyzer property instead if you need a language analyzer. This
analyzer can be updated on an existing field. Must be null for complex fields. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
"zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
"da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
"et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
"de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
"hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
"id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
"kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
"ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
"pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
"sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword index_analyzer: The name of the analyzer used at indexing time for the field. This
option can be used only with searchable fields. It must be set together with searchAnalyzer and
it cannot be set together with the analyzer option. This property cannot be set to the name of
a language analyzer; use the analyzer property instead if you need a language analyzer. Once
the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
"bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft",
"zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
"cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft",
"en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene",
"gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft",
"he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft",
"id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft",
"ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene",
"lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene",
"fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
"sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword normalizer: The name of the normalizer to use for the field. This option can be used
only with fields with filterable, sortable, or facetable enabled. Once the normalizer is
chosen, it cannot be changed for the field. Must be null for complex fields. Possible values
include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
:paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
:keyword synonym_maps: A list of the names of synonym maps to associate with this field. This
option can be used only with searchable fields. Currently only one synonym map per field is
supported. Assigning a synonym map to a field ensures that query terms targeting that field are
expanded at query-time using the rules in the synonym map. This attribute can be changed on
existing fields. Must be null or an empty collection for complex fields.
:paramtype synonym_maps: list[str]
:keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or
Collection(Edm.ComplexType). Must be null or empty for simple fields.
:paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
"""
super(SearchField, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.key = kwargs.get('key', None)
self.retrievable = kwargs.get('retrievable', None)
self.searchable = kwargs.get('searchable', None)
self.filterable = kwargs.get('filterable', None)
self.sortable = kwargs.get('sortable', None)
self.facetable = kwargs.get('facetable', None)
self.analyzer = kwargs.get('analyzer', None)
self.search_analyzer = kwargs.get('search_analyzer', None)
self.index_analyzer = kwargs.get('index_analyzer', None)
self.normalizer = kwargs.get('normalizer', None)
self.synonym_maps = kwargs.get('synonym_maps', None)
self.fields = kwargs.get('fields', None)
class SearchIndex(msrest.serialization.Model):
"""Represents a search index definition, which describes the fields and search behavior of an index.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the index.
:vartype name: str
:ivar fields: Required. The fields of the index.
:vartype fields: list[~azure.search.documents.indexes.models.SearchField]
:ivar scoring_profiles: The scoring profiles for the index.
:vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
:ivar default_scoring_profile: The name of the scoring profile to use if none is specified in
the query. If this property is not set and no scoring profile is specified in the query, then
default scoring (tf-idf) will be used.
:vartype default_scoring_profile: str
:ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
:vartype cors_options: ~azure.search.documents.indexes.models.CorsOptions
:ivar suggesters: The suggesters for the index.
:vartype suggesters: list[~azure.search.documents.indexes.models.Suggester]
:ivar analyzers: The analyzers for the index.
:vartype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
:ivar tokenizers: The tokenizers for the index.
:vartype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
:ivar token_filters: The token filters for the index.
:vartype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
:ivar char_filters: The character filters for the index.
:vartype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
:ivar normalizers: The normalizers for the index.
:vartype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
:ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:ivar similarity: The type of similarity algorithm to be used when scoring and ranking the
documents matching a search query. The similarity algorithm can only be defined at index
creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
algorithm is used.
:vartype similarity: ~azure.search.documents.indexes.models.Similarity
:ivar semantic_settings: Defines parameters for a search index that influence semantic
capabilities.
:vartype semantic_settings: ~azure.search.documents.indexes.models.SemanticSettings
:ivar e_tag: The ETag of the index.
:vartype e_tag: str
"""
_validation = {
'name': {'required': True},
'fields': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'fields': {'key': 'fields', 'type': '[SearchField]'},
'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'},
'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'},
'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'},
'suggesters': {'key': 'suggesters', 'type': '[Suggester]'},
'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'},
'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'},
'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'},
'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'},
'normalizers': {'key': 'normalizers', 'type': '[LexicalNormalizer]'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'similarity': {'key': 'similarity', 'type': 'Similarity'},
'semantic_settings': {'key': 'semantic', 'type': 'SemanticSettings'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the index.
:paramtype name: str
:keyword fields: Required. The fields of the index.
:paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
:keyword scoring_profiles: The scoring profiles for the index.
:paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
:keyword default_scoring_profile: The name of the scoring profile to use if none is specified
in the query. If this property is not set and no scoring profile is specified in the query,
then default scoring (tf-idf) will be used.
:paramtype default_scoring_profile: str
:keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
:paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions
:keyword suggesters: The suggesters for the index.
:paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester]
:keyword analyzers: The analyzers for the index.
:paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
:keyword tokenizers: The tokenizers for the index.
:paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
:keyword token_filters: The token filters for the index.
:paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
:keyword char_filters: The character filters for the index.
:paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
:keyword normalizers: The normalizers for the index.
:paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:keyword similarity: The type of similarity algorithm to be used when scoring and ranking the
documents matching a search query. The similarity algorithm can only be defined at index
creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
algorithm is used.
:paramtype similarity: ~azure.search.documents.indexes.models.Similarity
:keyword semantic_settings: Defines parameters for a search index that influence semantic
capabilities.
:paramtype semantic_settings: ~azure.search.documents.indexes.models.SemanticSettings
:keyword e_tag: The ETag of the index.
:paramtype e_tag: str
"""
super(SearchIndex, self).__init__(**kwargs)
self.name = kwargs['name']
self.fields = kwargs['fields']
self.scoring_profiles = kwargs.get('scoring_profiles', None)
self.default_scoring_profile = kwargs.get('default_scoring_profile', None)
self.cors_options = kwargs.get('cors_options', None)
self.suggesters = kwargs.get('suggesters', None)
self.analyzers = kwargs.get('analyzers', None)
self.tokenizers = kwargs.get('tokenizers', None)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
self.normalizers = kwargs.get('normalizers', None)
self.encryption_key = kwargs.get('encryption_key', None)
self.similarity = kwargs.get('similarity', None)
self.semantic_settings = kwargs.get('semantic_settings', None)
self.e_tag = kwargs.get('e_tag', None)
class SearchIndexer(msrest.serialization.Model):
"""Represents an indexer.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the indexer.
:vartype name: str
:ivar description: The description of the indexer.
:vartype description: str
:ivar data_source_name: Required. The name of the datasource from which this indexer reads
data.
:vartype data_source_name: str
:ivar skillset_name: The name of the skillset executing with this indexer.
:vartype skillset_name: str
:ivar target_index_name: Required. The name of the index to which this indexer writes data.
:vartype target_index_name: str
:ivar schedule: The schedule for this indexer.
:vartype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
:ivar parameters: Parameters for indexer execution.
:vartype parameters: ~azure.search.documents.indexes.models.IndexingParameters
:ivar field_mappings: Defines mappings between fields in the data source and corresponding
target fields in the index.
:vartype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
:ivar output_field_mappings: Output field mappings are applied after enrichment and immediately
before indexing.
:vartype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
:ivar is_disabled: A value indicating whether the indexer is disabled. Default is false.
:vartype is_disabled: bool
:ivar e_tag: The ETag of the indexer.
:vartype e_tag: str
:ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your indexer
definition (as well as indexer execution status) when you want full assurance that no one, not
even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your
indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore
attempts to set this property to null. You can change this property as needed if you want to
rotate your encryption key; Your indexer definition (and indexer execution status) will be
unaffected. Encryption with customer-managed keys is not available for free search services,
and is only available for paid services created on or after January 1, 2019.
:vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps
without having to rebuild the index every time.
:vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
"""
_validation = {
'name': {'required': True},
'data_source_name': {'required': True},
'target_index_name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'data_source_name': {'key': 'dataSourceName', 'type': 'str'},
'skillset_name': {'key': 'skillsetName', 'type': 'str'},
'target_index_name': {'key': 'targetIndexName', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'},
'parameters': {'key': 'parameters', 'type': 'IndexingParameters'},
'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'},
'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'},
'is_disabled': {'key': 'disabled', 'type': 'bool'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'cache': {'key': 'cache', 'type': 'SearchIndexerCache'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the indexer.
:paramtype name: str
:keyword description: The description of the indexer.
:paramtype description: str
:keyword data_source_name: Required. The name of the datasource from which this indexer reads
data.
:paramtype data_source_name: str
:keyword skillset_name: The name of the skillset executing with this indexer.
:paramtype skillset_name: str
:keyword target_index_name: Required. The name of the index to which this indexer writes data.
:paramtype target_index_name: str
:keyword schedule: The schedule for this indexer.
:paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
:keyword parameters: Parameters for indexer execution.
:paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters
:keyword field_mappings: Defines mappings between fields in the data source and corresponding
target fields in the index.
:paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
:keyword output_field_mappings: Output field mappings are applied after enrichment and
immediately before indexing.
:paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
:keyword is_disabled: A value indicating whether the indexer is disabled. Default is false.
:paramtype is_disabled: bool
:keyword e_tag: The ETag of the indexer.
:paramtype e_tag: str
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your indexer
definition (as well as indexer execution status) when you want full assurance that no one, not
even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your
indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore
attempts to set this property to null. You can change this property as needed if you want to
rotate your encryption key; Your indexer definition (and indexer execution status) will be
unaffected. Encryption with customer-managed keys is not available for free search services,
and is only available for paid services created on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification
steps without having to rebuild the index every time.
:paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
"""
super(SearchIndexer, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.data_source_name = kwargs['data_source_name']
self.skillset_name = kwargs.get('skillset_name', None)
self.target_index_name = kwargs['target_index_name']
self.schedule = kwargs.get('schedule', None)
self.parameters = kwargs.get('parameters', None)
self.field_mappings = kwargs.get('field_mappings', None)
self.output_field_mappings = kwargs.get('output_field_mappings', None)
self.is_disabled = kwargs.get('is_disabled', False)
self.e_tag = kwargs.get('e_tag', None)
self.encryption_key = kwargs.get('encryption_key', None)
self.cache = kwargs.get('cache', None)
class SearchIndexerCache(msrest.serialization.Model):
"""SearchIndexerCache.
:ivar storage_connection_string: The connection string to the storage account where the cache
data will be persisted.
:vartype storage_connection_string: str
:ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled.
:vartype enable_reprocessing: bool
"""
_attribute_map = {
'storage_connection_string': {'key': 'storageConnectionString', 'type': 'str'},
'enable_reprocessing': {'key': 'enableReprocessing', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword storage_connection_string: The connection string to the storage account where the
cache data will be persisted.
:paramtype storage_connection_string: str
:keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled.
:paramtype enable_reprocessing: bool
"""
super(SearchIndexerCache, self).__init__(**kwargs)
self.storage_connection_string = kwargs.get('storage_connection_string', None)
self.enable_reprocessing = kwargs.get('enable_reprocessing', None)
class SearchIndexerDataContainer(msrest.serialization.Model):
"""Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the table or view (for Azure SQL data source) or collection
(for CosmosDB data source) that will be indexed.
:vartype name: str
:ivar query: A query that is applied to this data container. The syntax and meaning of this
parameter is datasource-specific. Not supported by Azure SQL datasources.
:vartype query: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the table or view (for Azure SQL data source) or
collection (for CosmosDB data source) that will be indexed.
:paramtype name: str
:keyword query: A query that is applied to this data container. The syntax and meaning of this
parameter is datasource-specific. Not supported by Azure SQL datasources.
:paramtype query: str
"""
super(SearchIndexerDataContainer, self).__init__(**kwargs)
self.name = kwargs['name']
self.query = kwargs.get('query', None)
class SearchIndexerDataIdentity(msrest.serialization.Model):
"""Abstract base type for data identities.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchIndexerDataNoneIdentity, SearchIndexerDataUserAssignedIdentity.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity': 'SearchIndexerDataNoneIdentity', '#Microsoft.Azure.Search.SearchIndexerDataUserAssignedIdentity': 'SearchIndexerDataUserAssignedIdentity'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchIndexerDataIdentity, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class SearchIndexerDataNoneIdentity(SearchIndexerDataIdentity):
"""Clears the identity property of a datasource.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchIndexerDataNoneIdentity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity' # type: str
class SearchIndexerDataSource(msrest.serialization.Model):
"""Represents a datasource definition, which can be used to configure an indexer.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the datasource.
:vartype name: str
:ivar description: The description of the datasource.
:vartype description: str
:ivar type: Required. The type of the datasource. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
:vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
:ivar credentials: Required. Credentials for the datasource.
:vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
:ivar container: Required. The data container for the datasource.
:vartype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
:ivar identity: An explicit managed identity to use for this datasource. If not specified and
the connection string is a managed identity, the system-assigned managed identity is used. If
not specified, the value remains unchanged. If "none" is specified, the value of this property
is cleared.
:vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
:ivar data_change_detection_policy: The data change detection policy for the datasource.
:vartype data_change_detection_policy:
~azure.search.documents.indexes.models.DataChangeDetectionPolicy
:ivar data_deletion_detection_policy: The data deletion detection policy for the datasource.
:vartype data_deletion_detection_policy:
~azure.search.documents.indexes.models.DataDeletionDetectionPolicy
:ivar e_tag: The ETag of the data source.
:vartype e_tag: str
:ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your datasource
definition when you want full assurance that no one, not even Microsoft, can decrypt your data
source definition in Azure Cognitive Search. Once you have encrypted your data source
definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
this property to null. You can change this property as needed if you want to rotate your
encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
:vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'credentials': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'},
'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'},
'identity': {'key': 'identity', 'type': 'SearchIndexerDataIdentity'},
'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'},
'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the datasource.
:paramtype name: str
:keyword description: The description of the datasource.
:paramtype description: str
:keyword type: Required. The type of the datasource. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
:paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
:keyword credentials: Required. Credentials for the datasource.
:paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
:keyword container: Required. The data container for the datasource.
:paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
:keyword identity: An explicit managed identity to use for this datasource. If not specified
and the connection string is a managed identity, the system-assigned managed identity is used.
If not specified, the value remains unchanged. If "none" is specified, the value of this
property is cleared.
:paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
:keyword data_change_detection_policy: The data change detection policy for the datasource.
:paramtype data_change_detection_policy:
~azure.search.documents.indexes.models.DataChangeDetectionPolicy
:keyword data_deletion_detection_policy: The data deletion detection policy for the datasource.
:paramtype data_deletion_detection_policy:
~azure.search.documents.indexes.models.DataDeletionDetectionPolicy
:keyword e_tag: The ETag of the data source.
:paramtype e_tag: str
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your datasource
definition when you want full assurance that no one, not even Microsoft, can decrypt your data
source definition in Azure Cognitive Search. Once you have encrypted your data source
definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
this property to null. You can change this property as needed if you want to rotate your
encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
super(SearchIndexerDataSource, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.type = kwargs['type']
self.credentials = kwargs['credentials']
self.container = kwargs['container']
self.identity = kwargs.get('identity', None)
self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None)
self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None)
self.e_tag = kwargs.get('e_tag', None)
self.encryption_key = kwargs.get('encryption_key', None)
class SearchIndexerDataUserAssignedIdentity(SearchIndexerDataIdentity):
"""Specifies the identity for a datasource to use.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
:vartype odata_type: str
:ivar user_assigned_identity: Required. The fully qualified Azure resource Id of a user
assigned managed identity typically in the form
"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId"
that should have been assigned to the search service.
:vartype user_assigned_identity: str
"""
_validation = {
'odata_type': {'required': True},
'user_assigned_identity': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user
assigned managed identity typically in the form
"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId"
that should have been assigned to the search service.
:paramtype user_assigned_identity: str
"""
super(SearchIndexerDataUserAssignedIdentity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataUserAssignedIdentity' # type: str
self.user_assigned_identity = kwargs['user_assigned_identity']
class SearchIndexerError(msrest.serialization.Model):
"""Represents an item- or document-level indexing error.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar key: The key of the item for which indexing failed.
:vartype key: str
:ivar error_message: Required. The message describing the error that occurred while processing
the item.
:vartype error_message: str
:ivar status_code: Required. The status code indicating why the indexing operation failed.
Possible values include: 400 for a malformed input document, 404 for document not found, 409
for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the
service is too busy.
:vartype status_code: int
:ivar name: The name of the source at which the error originated. For example, this could refer
to a particular skill in the attached skillset. This may not be always available.
:vartype name: str
:ivar details: Additional, verbose details about the error to assist in debugging the indexer.
This may not be always available.
:vartype details: str
:ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This
may not be always available.
:vartype documentation_link: str
"""
_validation = {
'key': {'readonly': True},
'error_message': {'required': True, 'readonly': True},
'status_code': {'required': True, 'readonly': True},
'name': {'readonly': True},
'details': {'readonly': True},
'documentation_link': {'readonly': True},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'status_code': {'key': 'statusCode', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
'documentation_link': {'key': 'documentationLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchIndexerError, self).__init__(**kwargs)
self.key = None
self.error_message = None
self.status_code = None
self.name = None
self.details = None
self.documentation_link = None
class SearchIndexerKnowledgeStore(msrest.serialization.Model):
"""Definition of additional projections to azure blob, table, or files, of enriched data.
All required parameters must be populated in order to send to Azure.
:ivar storage_connection_string: Required. The connection string to the storage account
projections will be stored in.
:vartype storage_connection_string: str
:ivar projections: Required. A list of additional projections to perform during indexing.
:vartype projections:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection]
"""
_validation = {
'storage_connection_string': {'required': True},
'projections': {'required': True},
}
_attribute_map = {
'storage_connection_string': {'key': 'storageConnectionString', 'type': 'str'},
'projections': {'key': 'projections', 'type': '[SearchIndexerKnowledgeStoreProjection]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword storage_connection_string: Required. The connection string to the storage account
projections will be stored in.
:paramtype storage_connection_string: str
:keyword projections: Required. A list of additional projections to perform during indexing.
:paramtype projections:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection]
"""
super(SearchIndexerKnowledgeStore, self).__init__(**kwargs)
self.storage_connection_string = kwargs['storage_connection_string']
self.projections = kwargs['projections']
class SearchIndexerKnowledgeStoreProjectionSelector(msrest.serialization.Model):
"""Abstract class to share properties between concrete selectors.
:ivar reference_key_name: Name of reference key to different projection.
:vartype reference_key_name: str
:ivar generated_key_name: Name of generated key to store projection under.
:vartype generated_key_name: str
:ivar source: Source data to project.
:vartype source: str
:ivar source_context: Source context for complex projections.
:vartype source_context: str
:ivar inputs: Nested inputs for complex projections.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
_attribute_map = {
'reference_key_name': {'key': 'referenceKeyName', 'type': 'str'},
'generated_key_name': {'key': 'generatedKeyName', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'source_context': {'key': 'sourceContext', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword reference_key_name: Name of reference key to different projection.
:paramtype reference_key_name: str
:keyword generated_key_name: Name of generated key to store projection under.
:paramtype generated_key_name: str
:keyword source: Source data to project.
:paramtype source: str
:keyword source_context: Source context for complex projections.
:paramtype source_context: str
:keyword inputs: Nested inputs for complex projections.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
super(SearchIndexerKnowledgeStoreProjectionSelector, self).__init__(**kwargs)
self.reference_key_name = kwargs.get('reference_key_name', None)
self.generated_key_name = kwargs.get('generated_key_name', None)
self.source = kwargs.get('source', None)
self.source_context = kwargs.get('source_context', None)
self.inputs = kwargs.get('inputs', None)
class SearchIndexerKnowledgeStoreBlobProjectionSelector(SearchIndexerKnowledgeStoreProjectionSelector):
"""Abstract class to share properties between concrete selectors.
All required parameters must be populated in order to send to Azure.
:ivar reference_key_name: Name of reference key to different projection.
:vartype reference_key_name: str
:ivar generated_key_name: Name of generated key to store projection under.
:vartype generated_key_name: str
:ivar source: Source data to project.
:vartype source: str
:ivar source_context: Source context for complex projections.
:vartype source_context: str
:ivar inputs: Nested inputs for complex projections.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar storage_container: Required. Blob container to store projections in.
:vartype storage_container: str
"""
_validation = {
'storage_container': {'required': True},
}
_attribute_map = {
'reference_key_name': {'key': 'referenceKeyName', 'type': 'str'},
'generated_key_name': {'key': 'generatedKeyName', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'source_context': {'key': 'sourceContext', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'storage_container': {'key': 'storageContainer', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword reference_key_name: Name of reference key to different projection.
:paramtype reference_key_name: str
:keyword generated_key_name: Name of generated key to store projection under.
:paramtype generated_key_name: str
:keyword source: Source data to project.
:paramtype source: str
:keyword source_context: Source context for complex projections.
:paramtype source_context: str
:keyword inputs: Nested inputs for complex projections.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword storage_container: Required. Blob container to store projections in.
:paramtype storage_container: str
"""
super(SearchIndexerKnowledgeStoreBlobProjectionSelector, self).__init__(**kwargs)
self.storage_container = kwargs['storage_container']
class SearchIndexerKnowledgeStoreFileProjectionSelector(SearchIndexerKnowledgeStoreBlobProjectionSelector):
"""Projection definition for what data to store in Azure Files.
All required parameters must be populated in order to send to Azure.
:ivar reference_key_name: Name of reference key to different projection.
:vartype reference_key_name: str
:ivar generated_key_name: Name of generated key to store projection under.
:vartype generated_key_name: str
:ivar source: Source data to project.
:vartype source: str
:ivar source_context: Source context for complex projections.
:vartype source_context: str
:ivar inputs: Nested inputs for complex projections.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar storage_container: Required. Blob container to store projections in.
:vartype storage_container: str
"""
_validation = {
'storage_container': {'required': True},
}
_attribute_map = {
'reference_key_name': {'key': 'referenceKeyName', 'type': 'str'},
'generated_key_name': {'key': 'generatedKeyName', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'source_context': {'key': 'sourceContext', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'storage_container': {'key': 'storageContainer', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword reference_key_name: Name of reference key to different projection.
:paramtype reference_key_name: str
:keyword generated_key_name: Name of generated key to store projection under.
:paramtype generated_key_name: str
:keyword source: Source data to project.
:paramtype source: str
:keyword source_context: Source context for complex projections.
:paramtype source_context: str
:keyword inputs: Nested inputs for complex projections.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword storage_container: Required. Blob container to store projections in.
:paramtype storage_container: str
"""
super(SearchIndexerKnowledgeStoreFileProjectionSelector, self).__init__(**kwargs)
class SearchIndexerKnowledgeStoreObjectProjectionSelector(SearchIndexerKnowledgeStoreBlobProjectionSelector):
"""Projection definition for what data to store in Azure Blob.
All required parameters must be populated in order to send to Azure.
:ivar reference_key_name: Name of reference key to different projection.
:vartype reference_key_name: str
:ivar generated_key_name: Name of generated key to store projection under.
:vartype generated_key_name: str
:ivar source: Source data to project.
:vartype source: str
:ivar source_context: Source context for complex projections.
:vartype source_context: str
:ivar inputs: Nested inputs for complex projections.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar storage_container: Required. Blob container to store projections in.
:vartype storage_container: str
"""
_validation = {
'storage_container': {'required': True},
}
_attribute_map = {
'reference_key_name': {'key': 'referenceKeyName', 'type': 'str'},
'generated_key_name': {'key': 'generatedKeyName', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'source_context': {'key': 'sourceContext', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'storage_container': {'key': 'storageContainer', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword reference_key_name: Name of reference key to different projection.
:paramtype reference_key_name: str
:keyword generated_key_name: Name of generated key to store projection under.
:paramtype generated_key_name: str
:keyword source: Source data to project.
:paramtype source: str
:keyword source_context: Source context for complex projections.
:paramtype source_context: str
:keyword inputs: Nested inputs for complex projections.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword storage_container: Required. Blob container to store projections in.
:paramtype storage_container: str
"""
super(SearchIndexerKnowledgeStoreObjectProjectionSelector, self).__init__(**kwargs)
class SearchIndexerKnowledgeStoreProjection(msrest.serialization.Model):
"""Container object for various projection selectors.
:ivar tables: Projections to Azure Table storage.
:vartype tables:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector]
:ivar objects: Projections to Azure Blob storage.
:vartype objects:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector]
:ivar files: Projections to Azure File storage.
:vartype files:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[SearchIndexerKnowledgeStoreTableProjectionSelector]'},
'objects': {'key': 'objects', 'type': '[SearchIndexerKnowledgeStoreObjectProjectionSelector]'},
'files': {'key': 'files', 'type': '[SearchIndexerKnowledgeStoreFileProjectionSelector]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword tables: Projections to Azure Table storage.
:paramtype tables:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector]
:keyword objects: Projections to Azure Blob storage.
:paramtype objects:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector]
:keyword files: Projections to Azure File storage.
:paramtype files:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector]
"""
super(SearchIndexerKnowledgeStoreProjection, self).__init__(**kwargs)
self.tables = kwargs.get('tables', None)
self.objects = kwargs.get('objects', None)
self.files = kwargs.get('files', None)
class SearchIndexerKnowledgeStoreTableProjectionSelector(SearchIndexerKnowledgeStoreProjectionSelector):
"""Description for what data to store in Azure Tables.
All required parameters must be populated in order to send to Azure.
:ivar reference_key_name: Name of reference key to different projection.
:vartype reference_key_name: str
:ivar generated_key_name: Name of generated key to store projection under.
:vartype generated_key_name: str
:ivar source: Source data to project.
:vartype source: str
:ivar source_context: Source context for complex projections.
:vartype source_context: str
:ivar inputs: Nested inputs for complex projections.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar table_name: Required. Name of the Azure table to store projected data in.
:vartype table_name: str
"""
_validation = {
'table_name': {'required': True},
}
_attribute_map = {
'reference_key_name': {'key': 'referenceKeyName', 'type': 'str'},
'generated_key_name': {'key': 'generatedKeyName', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'source_context': {'key': 'sourceContext', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'table_name': {'key': 'tableName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword reference_key_name: Name of reference key to different projection.
:paramtype reference_key_name: str
:keyword generated_key_name: Name of generated key to store projection under.
:paramtype generated_key_name: str
:keyword source: Source data to project.
:paramtype source: str
:keyword source_context: Source context for complex projections.
:paramtype source_context: str
:keyword inputs: Nested inputs for complex projections.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword table_name: Required. Name of the Azure table to store projected data in.
:paramtype table_name: str
"""
super(SearchIndexerKnowledgeStoreTableProjectionSelector, self).__init__(**kwargs)
self.table_name = kwargs['table_name']
class SearchIndexerLimits(msrest.serialization.Model):
"""SearchIndexerLimits.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar max_run_time: The maximum duration that the indexer is permitted to run for one
execution.
:vartype max_run_time: ~datetime.timedelta
:ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be
considered valid for indexing.
:vartype max_document_extraction_size: long
:ivar max_document_content_characters_to_extract: The maximum number of characters that will be
extracted from a document picked up for indexing.
:vartype max_document_content_characters_to_extract: long
"""
_validation = {
'max_run_time': {'readonly': True},
'max_document_extraction_size': {'readonly': True},
'max_document_content_characters_to_extract': {'readonly': True},
}
_attribute_map = {
'max_run_time': {'key': 'maxRunTime', 'type': 'duration'},
'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'},
'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchIndexerLimits, self).__init__(**kwargs)
self.max_run_time = None
self.max_document_extraction_size = None
self.max_document_content_characters_to_extract = None
class SearchIndexerSkillset(msrest.serialization.Model):
"""A list of skills.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the skillset.
:vartype name: str
:ivar description: The description of the skillset.
:vartype description: str
:ivar skills: Required. A list of skills in the skillset.
:vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
:ivar cognitive_services_account: Details about cognitive services to be used when running
skills.
:vartype cognitive_services_account:
~azure.search.documents.indexes.models.CognitiveServicesAccount
:ivar knowledge_store: Definition of additional projections to azure blob, table, or files, of
enriched data.
:vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
:ivar e_tag: The ETag of the skillset.
:vartype e_tag: str
:ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your skillset
definition when you want full assurance that no one, not even Microsoft, can decrypt your
skillset definition in Azure Cognitive Search. Once you have encrypted your skillset
definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
this property to null. You can change this property as needed if you want to rotate your
encryption key; Your skillset definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
:vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
'name': {'required': True},
'skills': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'skills': {'key': 'skills', 'type': '[SearchIndexerSkill]'},
'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'},
'knowledge_store': {'key': 'knowledgeStore', 'type': 'SearchIndexerKnowledgeStore'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the skillset.
:paramtype name: str
:keyword description: The description of the skillset.
:paramtype description: str
:keyword skills: Required. A list of skills in the skillset.
:paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
:keyword cognitive_services_account: Details about cognitive services to be used when running
skills.
:paramtype cognitive_services_account:
~azure.search.documents.indexes.models.CognitiveServicesAccount
:keyword knowledge_store: Definition of additional projections to azure blob, table, or files,
of enriched data.
:paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
:keyword e_tag: The ETag of the skillset.
:paramtype e_tag: str
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your skillset
definition when you want full assurance that no one, not even Microsoft, can decrypt your
skillset definition in Azure Cognitive Search. Once you have encrypted your skillset
definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
this property to null. You can change this property as needed if you want to rotate your
encryption key; Your skillset definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
super(SearchIndexerSkillset, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.skills = kwargs['skills']
self.cognitive_services_account = kwargs.get('cognitive_services_account', None)
self.knowledge_store = kwargs.get('knowledge_store', None)
self.e_tag = kwargs.get('e_tag', None)
self.encryption_key = kwargs.get('encryption_key', None)
class SearchIndexerStatus(msrest.serialization.Model):
"""Represents the current status and execution history of an indexer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar status: Required. Overall indexer status. Possible values include: "unknown", "error",
"running".
:vartype status: str or ~azure.search.documents.indexes.models.IndexerStatus
:ivar last_result: The result of the most recent or an in-progress indexer execution.
:vartype last_result: ~azure.search.documents.indexes.models.IndexerExecutionResult
:ivar execution_history: Required. History of the recent indexer executions, sorted in reverse
chronological order.
:vartype execution_history: list[~azure.search.documents.indexes.models.IndexerExecutionResult]
:ivar limits: Required. The execution limits for the indexer.
:vartype limits: ~azure.search.documents.indexes.models.SearchIndexerLimits
"""
_validation = {
'status': {'required': True, 'readonly': True},
'last_result': {'readonly': True},
'execution_history': {'required': True, 'readonly': True},
'limits': {'required': True, 'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'},
'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'},
'limits': {'key': 'limits', 'type': 'SearchIndexerLimits'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchIndexerStatus, self).__init__(**kwargs)
self.status = None
self.last_result = None
self.execution_history = None
self.limits = None
class SearchIndexerWarning(msrest.serialization.Model):
"""Represents an item-level warning.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar key: The key of the item which generated a warning.
:vartype key: str
:ivar message: Required. The message describing the warning that occurred while processing the
item.
:vartype message: str
:ivar name: The name of the source at which the warning originated. For example, this could
refer to a particular skill in the attached skillset. This may not be always available.
:vartype name: str
:ivar details: Additional, verbose details about the warning to assist in debugging the
indexer. This may not be always available.
:vartype details: str
:ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This
may not be always available.
:vartype documentation_link: str
"""
_validation = {
'key': {'readonly': True},
'message': {'required': True, 'readonly': True},
'name': {'readonly': True},
'details': {'readonly': True},
'documentation_link': {'readonly': True},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
'documentation_link': {'key': 'documentationLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SearchIndexerWarning, self).__init__(**kwargs)
self.key = None
self.message = None
self.name = None
self.details = None
self.documentation_link = None
class SearchResourceEncryptionKey(msrest.serialization.Model):
"""A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps.
All required parameters must be populated in order to send to Azure.
:ivar key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
at rest.
:vartype key_name: str
:ivar key_version: Required. The version of your Azure Key Vault key to be used to encrypt your
data at rest.
:vartype key_version: str
:ivar vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
contains the key to be used to encrypt your data at rest. An example URI might be
https://my-keyvault-name.vault.azure.net.
:vartype vault_uri: str
:ivar access_credentials: Optional Azure Active Directory credentials used for accessing your
Azure Key Vault. Not required if using managed identity instead.
:vartype access_credentials:
~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials
:ivar identity: An explicit managed identity to use for this encryption key. If not specified
and the access credentials property is null, the system-assigned managed identity is used. On
update to the resource, if the explicit identity is unspecified, it remains unchanged. If
"none" is specified, the value of this property is cleared.
:vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
"""
_validation = {
'key_name': {'required': True},
'key_version': {'required': True},
'vault_uri': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyVaultKeyName', 'type': 'str'},
'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'},
'vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'},
'identity': {'key': 'identity', 'type': 'SearchIndexerDataIdentity'},
}
def __init__(
self,
**kwargs
):
"""
:keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your
data at rest.
:paramtype key_name: str
:keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt
your data at rest.
:paramtype key_version: str
:keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name,
that contains the key to be used to encrypt your data at rest. An example URI might be
https://my-keyvault-name.vault.azure.net.
:paramtype vault_uri: str
:keyword access_credentials: Optional Azure Active Directory credentials used for accessing
your Azure Key Vault. Not required if using managed identity instead.
:paramtype access_credentials:
~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials
:keyword identity: An explicit managed identity to use for this encryption key. If not
specified and the access credentials property is null, the system-assigned managed identity is
used. On update to the resource, if the explicit identity is unspecified, it remains unchanged.
If "none" is specified, the value of this property is cleared.
:paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
"""
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.key_version = kwargs['key_version']
self.vault_uri = kwargs['vault_uri']
self.access_credentials = kwargs.get('access_credentials', None)
self.identity = kwargs.get('identity', None)
class SemanticConfiguration(msrest.serialization.Model):
"""Defines a specific configuration to be used in the context of semantic capabilities.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the semantic configuration.
:vartype name: str
:ivar prioritized_fields: Required. Describes the title, content, and keyword fields to be used
for semantic ranking, captions, highlights, and answers. At least one of the three sub
properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set.
:vartype prioritized_fields: ~azure.search.documents.indexes.models.PrioritizedFields
"""
_validation = {
'name': {'required': True},
'prioritized_fields': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'prioritized_fields': {'key': 'prioritizedFields', 'type': 'PrioritizedFields'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the semantic configuration.
:paramtype name: str
:keyword prioritized_fields: Required. Describes the title, content, and keyword fields to be
used for semantic ranking, captions, highlights, and answers. At least one of the three sub
properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set.
:paramtype prioritized_fields: ~azure.search.documents.indexes.models.PrioritizedFields
"""
super(SemanticConfiguration, self).__init__(**kwargs)
self.name = kwargs['name']
self.prioritized_fields = kwargs['prioritized_fields']
class SemanticField(msrest.serialization.Model):
"""A field that is used as part of the semantic configuration.
:ivar field_name:
:vartype field_name: str
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword field_name:
:paramtype field_name: str
"""
super(SemanticField, self).__init__(**kwargs)
self.field_name = kwargs.get('field_name', None)
class SemanticSettings(msrest.serialization.Model):
"""Defines parameters for a search index that influence semantic capabilities.
:ivar configurations: The semantic configurations for the index.
:vartype configurations: list[~azure.search.documents.indexes.models.SemanticConfiguration]
"""
_attribute_map = {
'configurations': {'key': 'configurations', 'type': '[SemanticConfiguration]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword configurations: The semantic configurations for the index.
:paramtype configurations: list[~azure.search.documents.indexes.models.SemanticConfiguration]
"""
super(SemanticSettings, self).__init__(**kwargs)
self.configurations = kwargs.get('configurations', None)
class SentimentSkill(SearchIndexerSkill):
"""Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT",
"ru", "es", "sv", "tr".
:vartype default_language_code: str or
~azure.search.documents.indexes.models.SentimentSkillLanguage
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT",
"ru", "es", "sv", "tr".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.SentimentSkillLanguage
"""
super(SentimentSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
class SentimentSkillV3(SearchIndexerSkill):
"""Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
:vartype default_language_code: str
:ivar include_opinion_mining: If set to true, the skill output will include information from
Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated
assessment (adjective) in the text. Default is false.
:vartype include_opinion_mining: bool
:ivar model_version: The version of the model to use when calling the Text Analytics service.
It will default to the latest available when not specified. We recommend you do not specify
this value unless absolutely necessary.
:vartype model_version: str
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'include_opinion_mining': {'key': 'includeOpinionMining', 'type': 'bool'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
:paramtype default_language_code: str
:keyword include_opinion_mining: If set to true, the skill output will include information from
Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated
assessment (adjective) in the text. Default is false.
:paramtype include_opinion_mining: bool
:keyword model_version: The version of the model to use when calling the Text Analytics
service. It will default to the latest available when not specified. We recommend you do not
specify this value unless absolutely necessary.
:paramtype model_version: str
"""
super(SentimentSkillV3, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.SentimentSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.include_opinion_mining = kwargs.get('include_opinion_mining', False)
self.model_version = kwargs.get('model_version', None)
class ServiceCounters(msrest.serialization.Model):
"""Represents service-level resource counters and quotas.
All required parameters must be populated in order to send to Azure.
:ivar document_counter: Required. Total number of documents across all indexes in the service.
:vartype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
:ivar index_counter: Required. Total number of indexes.
:vartype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
:ivar indexer_counter: Required. Total number of indexers.
:vartype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
:ivar data_source_counter: Required. Total number of data sources.
:vartype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
:ivar storage_size_counter: Required. Total size of used storage in bytes.
:vartype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
:ivar synonym_map_counter: Required. Total number of synonym maps.
:vartype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
:ivar skillset_counter: Total number of skillsets.
:vartype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
"""
_validation = {
'document_counter': {'required': True},
'index_counter': {'required': True},
'indexer_counter': {'required': True},
'data_source_counter': {'required': True},
'storage_size_counter': {'required': True},
'synonym_map_counter': {'required': True},
}
_attribute_map = {
'document_counter': {'key': 'documentCount', 'type': 'ResourceCounter'},
'index_counter': {'key': 'indexesCount', 'type': 'ResourceCounter'},
'indexer_counter': {'key': 'indexersCount', 'type': 'ResourceCounter'},
'data_source_counter': {'key': 'dataSourcesCount', 'type': 'ResourceCounter'},
'storage_size_counter': {'key': 'storageSize', 'type': 'ResourceCounter'},
'synonym_map_counter': {'key': 'synonymMaps', 'type': 'ResourceCounter'},
'skillset_counter': {'key': 'skillsetCount', 'type': 'ResourceCounter'},
}
def __init__(
self,
**kwargs
):
"""
:keyword document_counter: Required. Total number of documents across all indexes in the
service.
:paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
:keyword index_counter: Required. Total number of indexes.
:paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
:keyword indexer_counter: Required. Total number of indexers.
:paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
:keyword data_source_counter: Required. Total number of data sources.
:paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
:keyword storage_size_counter: Required. Total size of used storage in bytes.
:paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
:keyword synonym_map_counter: Required. Total number of synonym maps.
:paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
:keyword skillset_counter: Total number of skillsets.
:paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
"""
super(ServiceCounters, self).__init__(**kwargs)
self.document_counter = kwargs['document_counter']
self.index_counter = kwargs['index_counter']
self.indexer_counter = kwargs['indexer_counter']
self.data_source_counter = kwargs['data_source_counter']
self.storage_size_counter = kwargs['storage_size_counter']
self.synonym_map_counter = kwargs['synonym_map_counter']
self.skillset_counter = kwargs.get('skillset_counter', None)
class ServiceLimits(msrest.serialization.Model):
"""Represents various service level limits.
:ivar max_fields_per_index: The maximum allowed fields per index.
:vartype max_fields_per_index: int
:ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an
index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
:vartype max_field_nesting_depth_per_index: int
:ivar max_complex_collection_fields_per_index: The maximum number of fields of type
Collection(Edm.ComplexType) allowed in an index.
:vartype max_complex_collection_fields_per_index: int
:ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex
collections allowed per document.
:vartype max_complex_objects_in_collections_per_document: int
"""
_attribute_map = {
'max_fields_per_index': {'key': 'maxFieldsPerIndex', 'type': 'int'},
'max_field_nesting_depth_per_index': {'key': 'maxFieldNestingDepthPerIndex', 'type': 'int'},
'max_complex_collection_fields_per_index': {'key': 'maxComplexCollectionFieldsPerIndex', 'type': 'int'},
'max_complex_objects_in_collections_per_document': {'key': 'maxComplexObjectsInCollectionsPerDocument', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword max_fields_per_index: The maximum allowed fields per index.
:paramtype max_fields_per_index: int
:keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in
an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
:paramtype max_field_nesting_depth_per_index: int
:keyword max_complex_collection_fields_per_index: The maximum number of fields of type
Collection(Edm.ComplexType) allowed in an index.
:paramtype max_complex_collection_fields_per_index: int
:keyword max_complex_objects_in_collections_per_document: The maximum number of objects in
complex collections allowed per document.
:paramtype max_complex_objects_in_collections_per_document: int
"""
super(ServiceLimits, self).__init__(**kwargs)
self.max_fields_per_index = kwargs.get('max_fields_per_index', None)
self.max_field_nesting_depth_per_index = kwargs.get('max_field_nesting_depth_per_index', None)
self.max_complex_collection_fields_per_index = kwargs.get('max_complex_collection_fields_per_index', None)
self.max_complex_objects_in_collections_per_document = kwargs.get('max_complex_objects_in_collections_per_document', None)
class ServiceStatistics(msrest.serialization.Model):
"""Response from a get service statistics request. If successful, it includes service level counters and limits.
All required parameters must be populated in order to send to Azure.
:ivar counters: Required. Service level resource counters.
:vartype counters: ~azure.search.documents.indexes.models.ServiceCounters
:ivar limits: Required. Service level general limits.
:vartype limits: ~azure.search.documents.indexes.models.ServiceLimits
"""
_validation = {
'counters': {'required': True},
'limits': {'required': True},
}
_attribute_map = {
'counters': {'key': 'counters', 'type': 'ServiceCounters'},
'limits': {'key': 'limits', 'type': 'ServiceLimits'},
}
def __init__(
self,
**kwargs
):
"""
:keyword counters: Required. Service level resource counters.
:paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters
:keyword limits: Required. Service level general limits.
:paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits
"""
super(ServiceStatistics, self).__init__(**kwargs)
self.counters = kwargs['counters']
self.limits = kwargs['limits']
class ShaperSkill(SearchIndexerSkill):
"""A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields).
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
super(ShaperSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' # type: str
class ShingleTokenFilter(TokenFilter):
"""Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2.
:vartype max_shingle_size: int
:ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less
than the value of maxShingleSize.
:vartype min_shingle_size: int
:ivar output_unigrams: A value indicating whether the output stream will contain the input
tokens (unigrams) as well as shingles. Default is true.
:vartype output_unigrams: bool
:ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those
times when no shingles are available. This property takes precedence when outputUnigrams is set
to false. Default is false.
:vartype output_unigrams_if_no_shingles: bool
:ivar token_separator: The string to use when joining adjacent tokens to form a shingle.
Default is a single space (" ").
:vartype token_separator: str
:ivar filter_token: The string to insert for each position at which there is no token. Default
is an underscore ("_").
:vartype filter_token: str
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_shingle_size': {'minimum': 2},
'min_shingle_size': {'minimum': 2},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_shingle_size': {'key': 'maxShingleSize', 'type': 'int'},
'min_shingle_size': {'key': 'minShingleSize', 'type': 'int'},
'output_unigrams': {'key': 'outputUnigrams', 'type': 'bool'},
'output_unigrams_if_no_shingles': {'key': 'outputUnigramsIfNoShingles', 'type': 'bool'},
'token_separator': {'key': 'tokenSeparator', 'type': 'str'},
'filter_token': {'key': 'filterToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2.
:paramtype max_shingle_size: int
:keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be
less than the value of maxShingleSize.
:paramtype min_shingle_size: int
:keyword output_unigrams: A value indicating whether the output stream will contain the input
tokens (unigrams) as well as shingles. Default is true.
:paramtype output_unigrams: bool
:keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for
those times when no shingles are available. This property takes precedence when outputUnigrams
is set to false. Default is false.
:paramtype output_unigrams_if_no_shingles: bool
:keyword token_separator: The string to use when joining adjacent tokens to form a shingle.
Default is a single space (" ").
:paramtype token_separator: str
:keyword filter_token: The string to insert for each position at which there is no token.
Default is an underscore ("_").
:paramtype filter_token: str
"""
super(ShingleTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' # type: str
self.max_shingle_size = kwargs.get('max_shingle_size', 2)
self.min_shingle_size = kwargs.get('min_shingle_size', 2)
self.output_unigrams = kwargs.get('output_unigrams', True)
self.output_unigrams_if_no_shingles = kwargs.get('output_unigrams_if_no_shingles', False)
self.token_separator = kwargs.get('token_separator', " ")
self.filter_token = kwargs.get('filter_token', "_")
class SkillNames(msrest.serialization.Model):
"""SkillNames.
:ivar skill_names: the names of skills to be reset.
:vartype skill_names: list[str]
"""
_attribute_map = {
'skill_names': {'key': 'skillNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword skill_names: the names of skills to be reset.
:paramtype skill_names: list[str]
"""
super(SkillNames, self).__init__(**kwargs)
self.skill_names = kwargs.get('skill_names', None)
class SnowballTokenFilter(TokenFilter):
"""A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar language: Required. The language to use. Possible values include: "armenian", "basque",
"catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian",
"italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian",
"spanish", "swedish", "turkish".
:vartype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'language': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'language': {'key': 'language', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword language: Required. The language to use. Possible values include: "armenian",
"basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2",
"hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian",
"russian", "spanish", "swedish", "turkish".
:paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
"""
super(SnowballTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' # type: str
self.language = kwargs['language']
class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy):
"""Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the data deletion detection
policy.Constant filled by server.
:vartype odata_type: str
:ivar soft_delete_column_name: The name of the column to use for soft-deletion detection.
:vartype soft_delete_column_name: str
:ivar soft_delete_marker_value: The marker value that identifies an item as deleted.
:vartype soft_delete_marker_value: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'soft_delete_column_name': {'key': 'softDeleteColumnName', 'type': 'str'},
'soft_delete_marker_value': {'key': 'softDeleteMarkerValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword soft_delete_column_name: The name of the column to use for soft-deletion detection.
:paramtype soft_delete_column_name: str
:keyword soft_delete_marker_value: The marker value that identifies an item as deleted.
:paramtype soft_delete_marker_value: str
"""
super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' # type: str
self.soft_delete_column_name = kwargs.get('soft_delete_column_name', None)
self.soft_delete_marker_value = kwargs.get('soft_delete_marker_value', None)
class SplitSkill(SearchIndexerSkill):
"""A skill to split a string into chunks of text.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
:vartype default_language_code: str or
~azure.search.documents.indexes.models.SplitSkillLanguage
:ivar text_split_mode: A value indicating which split mode to perform. Possible values include:
"pages", "sentences".
:vartype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
:ivar maximum_page_length: The desired maximum page length. Default is 10000.
:vartype maximum_page_length: int
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'},
'text_split_mode': {'key': 'textSplitMode', 'type': 'str'},
'maximum_page_length': {'key': 'maximumPageLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
:paramtype default_language_code: str or
~azure.search.documents.indexes.models.SplitSkillLanguage
:keyword text_split_mode: A value indicating which split mode to perform. Possible values
include: "pages", "sentences".
:paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
:keyword maximum_page_length: The desired maximum page length. Default is 10000.
:paramtype maximum_page_length: int
"""
super(SplitSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.SplitSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
self.text_split_mode = kwargs.get('text_split_mode', None)
self.maximum_page_length = kwargs.get('maximum_page_length', None)
class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy):
"""Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
:vartype odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' # type: str
class StemmerOverrideTokenFilter(TokenFilter):
"""Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar rules: Required. A list of stemming rules in the following format: "word => stem", for
example: "ran => run".
:vartype rules: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'rules': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword rules: Required. A list of stemming rules in the following format: "word => stem", for
example: "ran => run".
:paramtype rules: list[str]
"""
super(StemmerOverrideTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' # type: str
self.rules = kwargs['rules']
class StemmerTokenFilter(TokenFilter):
"""Language specific stemming filter. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar language: Required. The language to use. Possible values include: "arabic", "armenian",
"basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp",
"english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins",
"finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
"minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi",
"hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani",
"latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk",
"portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian",
"lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish".
:vartype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'language': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'language': {'key': 'language', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword language: Required. The language to use. Possible values include: "arabic",
"armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch",
"dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2",
"lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
"minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi",
"hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani",
"latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk",
"portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian",
"lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish".
:paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
"""
super(StemmerTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' # type: str
self.language = kwargs['language']
class StopAnalyzer(LexicalAnalyzer):
"""Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar stopwords: A list of stopwords.
:vartype stopwords: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'stopwords': {'key': 'stopwords', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the analyzer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword stopwords: A list of stopwords.
:paramtype stopwords: list[str]
"""
super(StopAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' # type: str
self.stopwords = kwargs.get('stopwords', None)
class StopwordsTokenFilter(TokenFilter):
"""Removes stop words from a token stream. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar stopwords: The list of stopwords. This property and the stopwords list property cannot
both be set.
:vartype stopwords: list[str]
:ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords
property cannot both be set. Default is English. Possible values include: "arabic", "armenian",
"basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english",
"finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian",
"irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian",
"sorani", "spanish", "swedish", "thai", "turkish".
:vartype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
:ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted
to lower case first. Default is false.
:vartype ignore_case: bool
:ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if
it's a stop word. Default is true.
:vartype remove_trailing_stop_words: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'stopwords': {'key': 'stopwords', 'type': '[str]'},
'stopwords_list': {'key': 'stopwordsList', 'type': 'str'},
'ignore_case': {'key': 'ignoreCase', 'type': 'bool'},
'remove_trailing_stop_words': {'key': 'removeTrailing', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword stopwords: The list of stopwords. This property and the stopwords list property cannot
both be set.
:paramtype stopwords: list[str]
:keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords
property cannot both be set. Default is English. Possible values include: "arabic", "armenian",
"basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english",
"finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian",
"irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian",
"sorani", "spanish", "swedish", "thai", "turkish".
:paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
:keyword ignore_case: A value indicating whether to ignore case. If true, all words are
converted to lower case first. Default is false.
:paramtype ignore_case: bool
:keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term
if it's a stop word. Default is true.
:paramtype remove_trailing_stop_words: bool
"""
super(StopwordsTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' # type: str
self.stopwords = kwargs.get('stopwords', None)
self.stopwords_list = kwargs.get('stopwords_list', None)
self.ignore_case = kwargs.get('ignore_case', False)
self.remove_trailing_stop_words = kwargs.get('remove_trailing_stop_words', True)
class Suggester(msrest.serialization.Model):
"""Defines how the Suggest API should apply to a group of fields in the index.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the suggester.
:vartype name: str
:ivar search_mode: A value indicating the capabilities of the suggester. Has constant value:
"analyzingInfixMatching".
:vartype search_mode: str
:ivar source_fields: Required. The list of field names to which the suggester applies. Each
field must be searchable.
:vartype source_fields: list[str]
"""
_validation = {
'name': {'required': True},
'search_mode': {'required': True, 'constant': True},
'source_fields': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'search_mode': {'key': 'searchMode', 'type': 'str'},
'source_fields': {'key': 'sourceFields', 'type': '[str]'},
}
search_mode = "analyzingInfixMatching"
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the suggester.
:paramtype name: str
:keyword source_fields: Required. The list of field names to which the suggester applies. Each
field must be searchable.
:paramtype source_fields: list[str]
"""
super(Suggester, self).__init__(**kwargs)
self.name = kwargs['name']
self.source_fields = kwargs['source_fields']
class SynonymMap(msrest.serialization.Model):
"""Represents a synonym map definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the synonym map.
:vartype name: str
:ivar format: The format of the synonym map. Only the 'solr' format is currently supported. Has
constant value: "solr".
:vartype format: str
:ivar synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
:vartype synonyms: str
:ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:ivar e_tag: The ETag of the synonym map.
:vartype e_tag: str
"""
_validation = {
'name': {'required': True},
'format': {'required': True, 'constant': True},
'synonyms': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'format': {'key': 'format', 'type': 'str'},
'synonyms': {'key': 'synonyms', 'type': 'str'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
format = "solr"
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the synonym map.
:paramtype name: str
:keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
:paramtype synonyms: str
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:keyword e_tag: The ETag of the synonym map.
:paramtype e_tag: str
"""
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs['name']
self.synonyms = kwargs['synonyms']
self.encryption_key = kwargs.get('encryption_key', None)
self.e_tag = kwargs.get('e_tag', None)
class SynonymTokenFilter(TokenFilter):
"""Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced
with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma
separated list of equivalent words. Set the expand option to change how this list is
interpreted.
:vartype synonyms: list[str]
:ivar ignore_case: A value indicating whether to case-fold input for matching. Default is
false.
:vartype ignore_case: bool
:ivar expand: A value indicating whether all words in the list of synonyms (if => notation is
not used) will map to one another. If true, all words in the list of synonyms (if => notation
is not used) will map to one another. The following list: incredible, unbelievable, fabulous,
amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable,
fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing =>
incredible. Default is true.
:vartype expand: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'synonyms': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'synonyms': {'key': 'synonyms', 'type': '[str]'},
'ignore_case': {'key': 'ignoreCase', 'type': 'bool'},
'expand': {'key': 'expand', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced
with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma
separated list of equivalent words. Set the expand option to change how this list is
interpreted.
:paramtype synonyms: list[str]
:keyword ignore_case: A value indicating whether to case-fold input for matching. Default is
false.
:paramtype ignore_case: bool
:keyword expand: A value indicating whether all words in the list of synonyms (if => notation
is not used) will map to one another. If true, all words in the list of synonyms (if =>
notation is not used) will map to one another. The following list: incredible, unbelievable,
fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable,
fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing =>
incredible. Default is true.
:paramtype expand: bool
"""
super(SynonymTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' # type: str
self.synonyms = kwargs['synonyms']
self.ignore_case = kwargs.get('ignore_case', False)
self.expand = kwargs.get('expand', True)
class TagScoringFunction(ScoringFunction):
"""Defines a function that boosts scores of documents with string values matching a given list of tags.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
:vartype type: str
:ivar field_name: Required. The name of the field used as input to the scoring function.
:vartype field_name: str
:ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
1.0.
:vartype boost: float
:ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:ivar parameters: Required. Parameter values for the tag scoring function.
:vartype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
"""
_validation = {
'type': {'required': True},
'field_name': {'required': True},
'boost': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'boost': {'key': 'boost', 'type': 'float'},
'interpolation': {'key': 'interpolation', 'type': 'str'},
'parameters': {'key': 'tag', 'type': 'TagScoringParameters'},
}
def __init__(
self,
**kwargs
):
"""
:keyword field_name: Required. The name of the field used as input to the scoring function.
:paramtype field_name: str
:keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
to 1.0.
:paramtype boost: float
:keyword interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
:paramtype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
:keyword parameters: Required. Parameter values for the tag scoring function.
:paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
"""
super(TagScoringFunction, self).__init__(**kwargs)
self.type = 'tag' # type: str
self.parameters = kwargs['parameters']
class TagScoringParameters(msrest.serialization.Model):
"""Provides parameter values to a tag scoring function.
All required parameters must be populated in order to send to Azure.
:ivar tags_parameter: Required. The name of the parameter passed in search queries to specify
the list of tags to compare against the target field.
:vartype tags_parameter: str
"""
_validation = {
'tags_parameter': {'required': True},
}
_attribute_map = {
'tags_parameter': {'key': 'tagsParameter', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword tags_parameter: Required. The name of the parameter passed in search queries to
specify the list of tags to compare against the target field.
:paramtype tags_parameter: str
"""
super(TagScoringParameters, self).__init__(**kwargs)
self.tags_parameter = kwargs['tags_parameter']
class TextTranslationSkill(SearchIndexerSkill):
"""A skill to translate text from one language to another.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar default_to_language_code: Required. The language code to translate documents into for
documents that don't specify the to language explicitly. Possible values include: "af", "ar",
"bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
"fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
"tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
"pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
"ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
:vartype default_to_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
:ivar default_from_language_code: The language code to translate documents from for documents
that don't specify the from language explicitly. Possible values include: "af", "ar", "bn",
"bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil",
"fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh",
"tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br",
"pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta",
"te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
:vartype default_from_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
:ivar suggested_from: The language code to translate documents from when neither the
fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the
automatic language detection is unsuccessful. Default is en. Possible values include: "af",
"ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et",
"fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja",
"sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl",
"pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv",
"ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml",
"pa".
:vartype suggested_from: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
'default_to_language_code': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'default_to_language_code': {'key': 'defaultToLanguageCode', 'type': 'str'},
'default_from_language_code': {'key': 'defaultFromLanguageCode', 'type': 'str'},
'suggested_from': {'key': 'suggestedFrom', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword default_to_language_code: Required. The language code to translate documents into for
documents that don't specify the to language explicitly. Possible values include: "af", "ar",
"bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
"fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
"tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
"pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
"ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
:paramtype default_to_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
:keyword default_from_language_code: The language code to translate documents from for
documents that don't specify the from language explicitly. Possible values include: "af", "ar",
"bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
"fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
"tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
"pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
"ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
:paramtype default_from_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
:keyword suggested_from: The language code to translate documents from when neither the
fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the
automatic language detection is unsuccessful. Default is en. Possible values include: "af",
"ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et",
"fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja",
"sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl",
"pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv",
"ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml",
"pa".
:paramtype suggested_from: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
"""
super(TextTranslationSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' # type: str
self.default_to_language_code = kwargs['default_to_language_code']
self.default_from_language_code = kwargs.get('default_from_language_code', None)
self.suggested_from = kwargs.get('suggested_from', None)
class TextWeights(msrest.serialization.Model):
"""Defines weights on index fields for which matches should boost scoring in search queries.
All required parameters must be populated in order to send to Azure.
:ivar weights: Required. The dictionary of per-field weights to boost document scoring. The
keys are field names and the values are the weights for each field.
:vartype weights: dict[str, float]
"""
_validation = {
'weights': {'required': True},
}
_attribute_map = {
'weights': {'key': 'weights', 'type': '{float}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword weights: Required. The dictionary of per-field weights to boost document scoring. The
keys are field names and the values are the weights for each field.
:paramtype weights: dict[str, float]
"""
super(TextWeights, self).__init__(**kwargs)
self.weights = kwargs['weights']
class TruncateTokenFilter(TokenFilter):
"""Truncates the terms to a specific length. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar length: The length at which terms will be truncated. Default and maximum is 300.
:vartype length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'length': {'key': 'length', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword length: The length at which terms will be truncated. Default and maximum is 300.
:paramtype length: int
"""
super(TruncateTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' # type: str
self.length = kwargs.get('length', 300)
class UaxUrlEmailTokenizer(LexicalTokenizer):
"""Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:vartype name: str
:ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:vartype max_token_length: int
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'max_token_length': {'maximum': 300},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'max_token_length': {'key': 'maxTokenLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
:paramtype max_token_length: int
"""
super(UaxUrlEmailTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
class UniqueTokenFilter(TokenFilter):
"""Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar only_on_same_position: A value indicating whether to remove duplicates only at the same
position. Default is false.
:vartype only_on_same_position: bool
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'only_on_same_position': {'key': 'onlyOnSamePosition', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword only_on_same_position: A value indicating whether to remove duplicates only at the
same position. Default is false.
:paramtype only_on_same_position: bool
"""
super(UniqueTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' # type: str
self.only_on_same_position = kwargs.get('only_on_same_position', False)
class WebApiSkill(SearchIndexerSkill):
"""A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
:vartype odata_type: str
:ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:vartype name: str
:ivar description: The description of the skill which describes the inputs, outputs, and usage
of the skill.
:vartype description: str
:ivar context: Represents the level at which operations take place, such as the document root
or document content (for example, /document or /document/content). The default is /document.
:vartype context: str
:ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
output of an upstream skill.
:vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:ivar outputs: Required. The output of a skill is either a field in a search index, or a value
that can be consumed as an input by another skill.
:vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:ivar uri: Required. The url for the Web API.
:vartype uri: str
:ivar http_headers: The headers required to make the http request.
:vartype http_headers: dict[str, str]
:ivar http_method: The method for the http request.
:vartype http_method: str
:ivar timeout: The desired timeout for the request. Default is 30 seconds.
:vartype timeout: ~datetime.timedelta
:ivar batch_size: The desired batch size which indicates number of documents.
:vartype batch_size: int
:ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web
API.
:vartype degree_of_parallelism: int
"""
_validation = {
'odata_type': {'required': True},
'inputs': {'required': True},
'outputs': {'required': True},
'uri': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'context': {'key': 'context', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'},
'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'},
'uri': {'key': 'uri', 'type': 'str'},
'http_headers': {'key': 'httpHeaders', 'type': '{str}'},
'http_method': {'key': 'httpMethod', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'batch_size': {'key': 'batchSize', 'type': 'int'},
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
:paramtype name: str
:keyword description: The description of the skill which describes the inputs, outputs, and
usage of the skill.
:paramtype description: str
:keyword context: Represents the level at which operations take place, such as the document
root or document content (for example, /document or /document/content). The default is
/document.
:paramtype context: str
:keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
the output of an upstream skill.
:paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
:keyword outputs: Required. The output of a skill is either a field in a search index, or a
value that can be consumed as an input by another skill.
:paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
:keyword uri: Required. The url for the Web API.
:paramtype uri: str
:keyword http_headers: The headers required to make the http request.
:paramtype http_headers: dict[str, str]
:keyword http_method: The method for the http request.
:paramtype http_method: str
:keyword timeout: The desired timeout for the request. Default is 30 seconds.
:paramtype timeout: ~datetime.timedelta
:keyword batch_size: The desired batch size which indicates number of documents.
:paramtype batch_size: int
:keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the
Web API.
:paramtype degree_of_parallelism: int
"""
super(WebApiSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' # type: str
self.uri = kwargs['uri']
self.http_headers = kwargs.get('http_headers', None)
self.http_method = kwargs.get('http_method', None)
self.timeout = kwargs.get('timeout', None)
self.batch_size = kwargs.get('batch_size', None)
self.degree_of_parallelism = kwargs.get('degree_of_parallelism', None)
class WordDelimiterTokenFilter(TokenFilter):
"""Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
server.
:vartype odata_type: str
:ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:vartype name: str
:ivar generate_word_parts: A value indicating whether to generate part words. If set, causes
parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is
true.
:vartype generate_word_parts: bool
:ivar generate_number_parts: A value indicating whether to generate number subwords. Default is
true.
:vartype generate_number_parts: bool
:ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated.
For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false.
:vartype catenate_words: bool
:ivar catenate_numbers: A value indicating whether maximum runs of number parts will be
catenated. For example, if this is set to true, "1-2" becomes "12". Default is false.
:vartype catenate_numbers: bool
:ivar catenate_all: A value indicating whether all subword parts will be catenated. For
example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false.
:vartype catenate_all: bool
:ivar split_on_case_change: A value indicating whether to split words on caseChange. For
example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true.
:vartype split_on_case_change: bool
:ivar preserve_original: A value indicating whether original words will be preserved and added
to the subword list. Default is false.
:vartype preserve_original: bool
:ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this
is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
:vartype split_on_numerics: bool
:ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each
subword. Default is true.
:vartype stem_english_possessive: bool
:ivar protected_words: A list of tokens to protect from being delimited.
:vartype protected_words: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'generate_word_parts': {'key': 'generateWordParts', 'type': 'bool'},
'generate_number_parts': {'key': 'generateNumberParts', 'type': 'bool'},
'catenate_words': {'key': 'catenateWords', 'type': 'bool'},
'catenate_numbers': {'key': 'catenateNumbers', 'type': 'bool'},
'catenate_all': {'key': 'catenateAll', 'type': 'bool'},
'split_on_case_change': {'key': 'splitOnCaseChange', 'type': 'bool'},
'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'},
'split_on_numerics': {'key': 'splitOnNumerics', 'type': 'bool'},
'stem_english_possessive': {'key': 'stemEnglishPossessive', 'type': 'bool'},
'protected_words': {'key': 'protectedWords', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
:paramtype name: str
:keyword generate_word_parts: A value indicating whether to generate part words. If set, causes
parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is
true.
:paramtype generate_word_parts: bool
:keyword generate_number_parts: A value indicating whether to generate number subwords. Default
is true.
:paramtype generate_number_parts: bool
:keyword catenate_words: A value indicating whether maximum runs of word parts will be
catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default
is false.
:paramtype catenate_words: bool
:keyword catenate_numbers: A value indicating whether maximum runs of number parts will be
catenated. For example, if this is set to true, "1-2" becomes "12". Default is false.
:paramtype catenate_numbers: bool
:keyword catenate_all: A value indicating whether all subword parts will be catenated. For
example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false.
:paramtype catenate_all: bool
:keyword split_on_case_change: A value indicating whether to split words on caseChange. For
example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true.
:paramtype split_on_case_change: bool
:keyword preserve_original: A value indicating whether original words will be preserved and
added to the subword list. Default is false.
:paramtype preserve_original: bool
:keyword split_on_numerics: A value indicating whether to split on numbers. For example, if
this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
:paramtype split_on_numerics: bool
:keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each
subword. Default is true.
:paramtype stem_english_possessive: bool
:keyword protected_words: A list of tokens to protect from being delimited.
:paramtype protected_words: list[str]
"""
super(WordDelimiterTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' # type: str
self.generate_word_parts = kwargs.get('generate_word_parts', True)
self.generate_number_parts = kwargs.get('generate_number_parts', True)
self.catenate_words = kwargs.get('catenate_words', False)
self.catenate_numbers = kwargs.get('catenate_numbers', False)
self.catenate_all = kwargs.get('catenate_all', False)
self.split_on_case_change = kwargs.get('split_on_case_change', True)
self.preserve_original = kwargs.get('preserve_original', False)
self.split_on_numerics = kwargs.get('split_on_numerics', True)
self.stem_english_possessive = kwargs.get('stem_english_possessive', True)
self.protected_words = kwargs.get('protected_words', None)
|
py | b4069f6f48a343a62755e3319cf1fdf6ad6b404e | import json
import logging
import multiprocessing
import os
from pathlib import Path
from typing import Optional, Tuple, List
import psutil as ps
import re
import shutil
import subprocess
import tempfile
import time
import traceback
from datetime import datetime, timezone
from ddtrace import tracer
from ddtrace.ext import SpanTypes
from django.conf import settings
from usaspending_api.awards.v2.filters.filter_helpers import add_date_range_comparison_types
from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, assistance_type_mapping, idv_type_mapping
from usaspending_api.common.csv_helpers import count_rows_in_delimited_file, partition_large_delimited_file
from usaspending_api.common.exceptions import InvalidParameterException
from usaspending_api.common.helpers.orm_helpers import generate_raw_quoted_query
from usaspending_api.common.helpers.s3_helpers import multipart_upload
from usaspending_api.common.helpers.text_helpers import slugify_text_for_file_names
from usaspending_api.common.retrieve_file_from_uri import RetrieveFileFromUri
from usaspending_api.common.tracing import SubprocessTrace
from usaspending_api.download.download_utils import construct_data_date_range
from usaspending_api.download.filestreaming import NAMING_CONFLICT_DISCRIMINATOR
from usaspending_api.download.filestreaming.download_source import DownloadSource
from usaspending_api.download.filestreaming.file_description import build_file_description, save_file_description
from usaspending_api.download.filestreaming.zip_file import append_files_to_zip_file
from usaspending_api.download.helpers import verify_requested_columns_available, write_to_download_log as write_to_log
from usaspending_api.download.lookups import JOB_STATUS_DICT, VALUE_MAPPINGS, FILE_FORMATS
from usaspending_api.download.models import DownloadJob
DOWNLOAD_VISIBILITY_TIMEOUT = 60 * 10
MAX_VISIBILITY_TIMEOUT = 60 * 60 * settings.DOWNLOAD_DB_TIMEOUT_IN_HOURS
EXCEL_ROW_LIMIT = 1000000
WAIT_FOR_PROCESS_SLEEP = 5
JOB_TYPE = "USAspendingDownloader"
logger = logging.getLogger(__name__)
def generate_download(download_job: DownloadJob, origination: Optional[str] = None):
"""Create data archive files from the download job object"""
# Parse data from download_job
json_request = json.loads(download_job.json_request)
columns = json_request.get("columns", None)
limit = json_request.get("limit", None)
piid = json_request.get("piid", None)
award_id = json_request.get("award_id")
assistance_id = json_request.get("assistance_id")
file_format = json_request.get("file_format")
request_type = json_request.get("request_type")
span = tracer.current_span()
if span and request_type:
span.resource = request_type
file_name = start_download(download_job)
working_dir = None
try:
# Create temporary files and working directory
zip_file_path = settings.CSV_LOCAL_PATH + file_name
if not settings.IS_LOCAL and os.path.exists(zip_file_path):
# Clean up a zip file that might exist from a prior attempt at this download
os.remove(zip_file_path)
working_dir = os.path.splitext(zip_file_path)[0]
if not os.path.exists(working_dir):
os.mkdir(working_dir)
write_to_log(message=f"Generating {file_name}", download_job=download_job)
# Generate sources from the JSON request object
sources = get_download_sources(json_request, origination)
for source in sources:
# Parse and write data to the file; if there are no matching columns for a source then add an empty file
source_column_count = len(source.columns(columns))
if source_column_count == 0:
create_empty_data_file(
source, download_job, working_dir, piid, assistance_id, zip_file_path, file_format
)
else:
download_job.number_of_columns += source_column_count
parse_source(
source, columns, download_job, working_dir, piid, assistance_id, zip_file_path, limit, file_format
)
include_data_dictionary = json_request.get("include_data_dictionary")
if include_data_dictionary:
add_data_dictionary_to_zip(working_dir, zip_file_path)
include_file_description = json_request.get("include_file_description")
if include_file_description:
write_to_log(message="Adding file description to zip file")
file_description = build_file_description(include_file_description["source"], sources)
file_description = file_description.replace("[AWARD_ID]", str(award_id))
file_description_path = save_file_description(
working_dir, include_file_description["destination"], file_description
)
append_files_to_zip_file([file_description_path], zip_file_path)
download_job.file_size = os.stat(zip_file_path).st_size
except InvalidParameterException as e:
exc_msg = "InvalidParameterException was raised while attempting to process the DownloadJob"
fail_download(download_job, e, exc_msg)
raise InvalidParameterException(e)
except Exception as e:
# Set error message; job_status_id will be set in download_sqs_worker.handle()
exc_msg = "An exception was raised while attempting to process the DownloadJob"
fail_download(download_job, e, exc_msg)
raise Exception(download_job.error_message) from e
finally:
# Remove working directory
if working_dir and os.path.exists(working_dir):
shutil.rmtree(working_dir)
_kill_spawned_processes(download_job)
# push file to S3 bucket, if not local
if not settings.IS_LOCAL:
with tracer.trace(
name=f"job.{JOB_TYPE}.download.s3",
service="bulk-download",
resource=f"s3://{settings.BULK_DOWNLOAD_S3_BUCKET_NAME}",
span_type=SpanTypes.WORKER,
) as span, tracer.trace(
name="s3.command",
service="aws.s3",
resource=".".join(
[multipart_upload.__module__, (multipart_upload.__qualname__ or multipart_upload.__name__)]
),
span_type=SpanTypes.WEB,
) as s3_span:
# NOTE: Traces still not auto-picking-up aws.s3 service upload activity
# Could be that the patches for boto and botocore don't cover the newer boto3 S3Transfer upload approach
span.set_tag("file_name", file_name)
try:
bucket = settings.BULK_DOWNLOAD_S3_BUCKET_NAME
region = settings.USASPENDING_AWS_REGION
s3_span.set_tags({"bucket": bucket, "region": region, "file": zip_file_path})
start_uploading = time.perf_counter()
multipart_upload(bucket, region, zip_file_path, os.path.basename(zip_file_path))
write_to_log(
message=f"Uploading took {time.perf_counter() - start_uploading:.2f}s", download_job=download_job
)
except Exception as e:
# Set error message; job_status_id will be set in download_sqs_worker.handle()
exc_msg = "An exception was raised while attempting to upload the file"
fail_download(download_job, e, exc_msg)
if isinstance(e, InvalidParameterException):
raise InvalidParameterException(e)
else:
raise Exception(download_job.error_message) from e
finally:
# Remove generated file
if os.path.exists(zip_file_path):
os.remove(zip_file_path)
_kill_spawned_processes(download_job)
return finish_download(download_job)
def get_download_sources(json_request: dict, origination: Optional[str] = None):
download_sources = []
for download_type in json_request["download_types"]:
agency_id = json_request.get("agency", "all")
filter_function = VALUE_MAPPINGS[download_type]["filter_function"]
download_type_table = VALUE_MAPPINGS[download_type]["table"]
if VALUE_MAPPINGS[download_type]["source_type"] == "award":
# Award downloads
# Use correct date range columns for advanced search
# (Will not change anything for keyword search since "time_period" is not provided))
filters = add_date_range_comparison_types(
json_request["filters"],
is_subaward=download_type != "awards",
gte_date_type="action_date",
lte_date_type="date_signed",
)
queryset = filter_function(filters)
if filters.get("prime_and_sub_award_types") is not None:
award_type_codes = set(filters["prime_and_sub_award_types"][download_type])
else:
award_type_codes = set(filters["award_type_codes"])
if (
award_type_codes & (set(contract_type_mapping.keys()) | set(idv_type_mapping.keys()))
or "procurement" in award_type_codes
):
# only generate d1 files if the user is asking for contract data
d1_source = DownloadSource(
VALUE_MAPPINGS[download_type]["table_name"], "d1", download_type, agency_id, filters
)
d1_filters = {f"{VALUE_MAPPINGS[download_type]['contract_data']}__isnull": False}
d1_source.queryset = queryset & download_type_table.objects.filter(**d1_filters)
download_sources.append(d1_source)
if award_type_codes & set(assistance_type_mapping.keys()) or ("grant" in award_type_codes):
# only generate d2 files if the user is asking for assistance data
d2_source = DownloadSource(
VALUE_MAPPINGS[download_type]["table_name"], "d2", download_type, agency_id, filters
)
d2_filters = {f"{VALUE_MAPPINGS[download_type]['assistance_data']}__isnull": False}
d2_source.queryset = queryset & download_type_table.objects.filter(**d2_filters)
download_sources.append(d2_source)
elif VALUE_MAPPINGS[download_type]["source_type"] == "account":
# Account downloads
account_source = DownloadSource(
VALUE_MAPPINGS[download_type]["table_name"], json_request["account_level"], download_type, agency_id
)
account_source.queryset = filter_function(
download_type,
VALUE_MAPPINGS[download_type]["table"],
json_request["filters"],
json_request["account_level"],
)
download_sources.append(account_source)
elif VALUE_MAPPINGS[download_type]["source_type"] == "disaster":
# Disaster Page downloads
disaster_source = DownloadSource(
VALUE_MAPPINGS[download_type]["source_type"],
VALUE_MAPPINGS[download_type]["table_name"],
download_type,
agency_id,
)
disaster_source.award_category = json_request["award_category"]
disaster_source.queryset = filter_function(
json_request["filters"], download_type, VALUE_MAPPINGS[download_type]["base_fields"]
)
download_sources.append(disaster_source)
verify_requested_columns_available(tuple(download_sources), json_request.get("columns", []))
return download_sources
def build_data_file_name(source, download_job, piid, assistance_id):
d_map = {"d1": "Contracts", "d2": "Assistance", "treasury_account": "TAS", "federal_account": "FA"}
if download_job and download_job.monthly_download:
# For monthly archives, use the existing detailed zip filename for the data files
# e.g. FY(All)-012_Contracts_Delta_20191108.zip -> FY(All)-012_Contracts_Delta_20191108_%.csv
return strip_file_extension(download_job.file_name)
file_name_pattern = VALUE_MAPPINGS[source.source_type]["download_name"]
timestamp = datetime.strftime(datetime.now(timezone.utc), "%Y-%m-%d_H%HM%MS%S")
if source.is_for_idv or source.is_for_contract:
data_file_name = file_name_pattern.format(piid=slugify_text_for_file_names(piid, "UNKNOWN", 50))
elif source.is_for_assistance:
data_file_name = file_name_pattern.format(
assistance_id=slugify_text_for_file_names(assistance_id, "UNKNOWN", 50)
)
elif source.source_type == "disaster_recipient":
data_file_name = file_name_pattern.format(award_category=source.award_category, timestamp=timestamp)
else:
if source.agency_code == "all":
agency = "All"
else:
agency = str(source.agency_code)
request = json.loads(download_job.json_request)
filters = request["filters"]
if request.get("limit"):
agency = ""
elif source.file_type not in ("treasury_account", "federal_account"):
agency = f"{agency}_"
data_file_name = file_name_pattern.format(
agency=agency,
data_quarters=construct_data_date_range(filters),
level=d_map[source.file_type],
timestamp=timestamp,
type=d_map[source.file_type],
)
return data_file_name
def parse_source(source, columns, download_job, working_dir, piid, assistance_id, zip_file_path, limit, file_format):
"""Write to delimited text file(s) and zip file(s) using the source data"""
data_file_name = build_data_file_name(source, download_job, piid, assistance_id)
source_query = source.row_emitter(columns)
extension = FILE_FORMATS[file_format]["extension"]
source.file_name = f"{data_file_name}.{extension}"
source_path = os.path.join(working_dir, source.file_name)
write_to_log(message=f"Preparing to download data as {source.file_name}", download_job=download_job)
# Generate the query file; values, limits, dates fixed
export_query = generate_export_query(source_query, limit, source, columns, file_format)
temp_file, temp_file_path = generate_export_query_temp_file(export_query, download_job)
start_time = time.perf_counter()
try:
# Create a separate process to run the PSQL command; wait
psql_process = multiprocessing.Process(target=execute_psql, args=(temp_file_path, source_path, download_job))
psql_process.start()
wait_for_process(psql_process, start_time, download_job)
delim = FILE_FORMATS[file_format]["delimiter"]
# Log how many rows we have
write_to_log(message="Counting rows in delimited text file", download_job=download_job)
try:
download_job.number_of_rows += count_rows_in_delimited_file(
filename=source_path, has_header=True, delimiter=delim
)
except Exception:
write_to_log(
message="Unable to obtain delimited text file line count", is_error=True, download_job=download_job
)
download_job.save()
# Create a separate process to split the large data files into smaller file and write to zip; wait
zip_process = multiprocessing.Process(
target=split_and_zip_data_files,
args=(zip_file_path, source_path, data_file_name, file_format, download_job),
)
zip_process.start()
wait_for_process(zip_process, start_time, download_job)
download_job.save()
except Exception as e:
raise e
finally:
# Remove temporary files
os.close(temp_file)
os.remove(temp_file_path)
def split_and_zip_data_files(zip_file_path, source_path, data_file_name, file_format, download_job=None):
with SubprocessTrace(
name=f"job.{JOB_TYPE}.download.zip",
service="bulk-download",
span_type=SpanTypes.WORKER,
source_path=source_path,
zip_file_path=zip_file_path,
) as span:
try:
# Split data files into separate files
# e.g. `Assistance_prime_transactions_delta_%s.csv`
log_time = time.perf_counter()
delim = FILE_FORMATS[file_format]["delimiter"]
extension = FILE_FORMATS[file_format]["extension"]
output_template = f"{data_file_name}_%s.{extension}"
write_to_log(message="Beginning the delimited text file partition", download_job=download_job)
list_of_files = partition_large_delimited_file(
file_path=source_path, delimiter=delim, row_limit=EXCEL_ROW_LIMIT, output_name_template=output_template
)
span.set_tag("file_parts", len(list_of_files))
msg = f"Partitioning data into {len(list_of_files)} files took {time.perf_counter() - log_time:.4f}s"
write_to_log(message=msg, download_job=download_job)
# Zip the split files into one zipfile
write_to_log(message="Beginning zipping and compression", download_job=download_job)
log_time = time.perf_counter()
append_files_to_zip_file(list_of_files, zip_file_path)
write_to_log(
message=f"Writing to zipfile took {time.perf_counter() - log_time:.4f}s", download_job=download_job
)
except Exception as e:
message = "Exception while partitioning text file"
if download_job:
fail_download(download_job, e, message)
write_to_log(message=message, download_job=download_job, is_error=True)
logger.error(e)
raise e
def start_download(download_job):
# Update job attributes
download_job.job_status_id = JOB_STATUS_DICT["running"]
download_job.number_of_rows = 0
download_job.number_of_columns = 0
download_job.file_size = 0
download_job.save()
write_to_log(message=f"Starting to process DownloadJob {download_job.download_job_id}", download_job=download_job)
return download_job.file_name
def finish_download(download_job):
download_job.job_status_id = JOB_STATUS_DICT["finished"]
download_job.save()
write_to_log(message=f"Finished processing DownloadJob {download_job.download_job_id}", download_job=download_job)
return download_job.file_name
def wait_for_process(process, start_time, download_job):
"""Wait for the process to complete, throw errors for timeouts or Process exceptions"""
log_time = time.perf_counter()
# Let the thread run until it finishes (max MAX_VISIBILITY_TIMEOUT), with a buffer of DOWNLOAD_VISIBILITY_TIMEOUT
sleep_count = 0
while process.is_alive():
if (
download_job
and not download_job.monthly_download
and (time.perf_counter() - start_time) > MAX_VISIBILITY_TIMEOUT
):
break
if sleep_count < 10:
time.sleep(WAIT_FOR_PROCESS_SLEEP / 5)
else:
time.sleep(WAIT_FOR_PROCESS_SLEEP)
sleep_count += 1
over_time = (time.perf_counter() - start_time) >= MAX_VISIBILITY_TIMEOUT
if download_job and (not download_job.monthly_download and over_time) or process.exitcode != 0:
if process.is_alive():
# Process is running for longer than MAX_VISIBILITY_TIMEOUT, kill it
write_to_log(
message=f"Attempting to terminate process (pid {process.pid})", download_job=download_job, is_error=True
)
process.terminate()
e = TimeoutError(
f"DownloadJob {download_job.download_job_id} lasted longer than {MAX_VISIBILITY_TIMEOUT / 3600} hours"
)
else:
# An error occurred in the process
e = Exception("Command failed. Please see the logs for details.")
raise e
return time.perf_counter() - log_time
def generate_export_query(source_query, limit, source, columns, file_format):
if limit:
source_query = source_query[:limit]
query_annotated = apply_annotations_to_sql(generate_raw_quoted_query(source_query), source.columns(columns))
options = FILE_FORMATS[file_format]["options"]
return r"\COPY ({}) TO STDOUT {}".format(query_annotated, options)
def generate_export_query_temp_file(export_query, download_job, temp_dir=None):
write_to_log(message=f"Saving PSQL Query: {export_query}", download_job=download_job, is_debug=True)
dir_name = "/tmp"
if temp_dir:
dir_name = temp_dir
# Create a unique temporary file to hold the raw query, using \copy
(temp_sql_file, temp_sql_file_path) = tempfile.mkstemp(prefix="bd_sql_", dir=dir_name)
with open(temp_sql_file_path, "w") as file:
file.write(export_query)
return temp_sql_file, temp_sql_file_path
def apply_annotations_to_sql(raw_query, aliases):
"""
Django's ORM understandably doesn't allow aliases to be the same names as other fields available. However, if we
want to use the efficiency of psql's COPY method and keep the column names, we need to allow these scenarios. This
function simply outputs a modified raw sql which does the aliasing, allowing these scenarios.
"""
cte_sql, select_statements = _select_columns(raw_query)
DIRECT_SELECT_QUERY_REGEX = r'^[^ ]*\."[^"]*"$' # Django is pretty consistent with how it prints out queries
# Create a list from the non-derived values between SELECT and FROM
selects_list = [str for str in select_statements if re.search(DIRECT_SELECT_QUERY_REGEX, str)]
# Create a list from the derived values between SELECT and FROM
aliased_list = [str for str in select_statements if not re.search(DIRECT_SELECT_QUERY_REGEX, str.strip())]
deriv_dict = {}
for str in aliased_list:
split_string = _top_level_split(str, " AS ")
alias = split_string[1].replace('"', "").replace(",", "").strip()
if alias not in aliases:
raise Exception(f'alias "{alias}" not found!')
deriv_dict[alias] = split_string[0]
# Match aliases with their values
values_list = [
f'{deriv_dict[alias] if alias in deriv_dict else selects_list.pop(0)} AS "{alias}"' for alias in aliases
]
sql = raw_query.replace(_top_level_split(raw_query, "FROM")[0], "SELECT " + ", ".join(values_list), 1)
if cte_sql:
sql = f"{cte_sql} {sql}"
# Now that we've converted the queryset to SQL, cleaned up aliasing for non-annotated fields, and sorted
# the SELECT columns, there's one final step. The Django ORM does now allow alias names to conflict with
# column/field names on the underlying model. For annotated fields, naming conflict exceptions occur at
# the time they are applied to the queryset which means they never get to this function. To work around
# this, we give them a temporary name that cannot conflict with a field name on the model by appending
# the suffix specified by NAMING_CONFLICT_DISCRIMINATOR. Now that we have the "final" SQL, we must remove
# that suffix.
return sql.replace(NAMING_CONFLICT_DISCRIMINATOR, "")
def _select_columns(sql: str) -> Tuple[str, List[str]]:
in_quotes = False
in_cte = False
parens_depth = 0
last_processed_index = 0
cte_sql = None
retval = []
for index, char in enumerate(sql):
if char == '"':
in_quotes = not in_quotes
if in_quotes:
continue
if char == "(":
parens_depth = parens_depth + 1
if in_cte:
continue
if char == ")":
parens_depth = parens_depth - 1
if in_cte and parens_depth == 0:
in_cte = False
cte_sql = sql[: index + 1]
last_processed_index = index
if parens_depth == 0 and not in_cte:
# Set flag to ignore the CTE
if sql[index : index + 5] == "WITH ":
in_cte = True
# Ignore the SELECT statement
if sql[index : index + 6] == "SELECT":
last_processed_index = index + 6
# If there is a FROM at the bottom level, we have all the values we need and can return
if sql[index : index + 4] == "FROM":
retval.append(sql[last_processed_index:index].strip())
return cte_sql, retval
# If there is a comma on the bottom level, add another select value and start parsing a new one
if char == ",":
retval.append(sql[last_processed_index:index].strip())
last_processed_index = index + 1 # skips the comma by design
return cte_sql, retval # this will almost certainly error out later.
def _top_level_split(sql, splitter):
in_quotes = False
parens_depth = 0
for index, char in enumerate(sql):
if char == '"':
in_quotes = not in_quotes
if in_quotes:
continue
if char == "(":
parens_depth = parens_depth + 1
if char == ")":
parens_depth = parens_depth - 1
if parens_depth == 0:
if sql[index : index + len(splitter)] == splitter:
return [sql[:index], sql[index + len(splitter) :]]
raise Exception(f"SQL string ${sql} cannot be split on ${splitter}")
def execute_psql(temp_sql_file_path, source_path, download_job):
"""Executes a single PSQL command within its own Subprocess"""
download_sql = Path(temp_sql_file_path).read_text()
if download_sql.startswith("\\COPY"):
# Trace library parses the SQL, but cannot understand the psql-specific \COPY command. Use standard COPY here.
download_sql = download_sql[1:]
# Stack 3 context managers: (1) psql code, (2) Download replica query, (3) (same) Postgres query
with SubprocessTrace(
name=f"job.{JOB_TYPE}.download.psql",
service="bulk-download",
resource=download_sql,
span_type=SpanTypes.SQL,
source_path=source_path,
), tracer.trace(
name="postgres.query", service="db_downloaddb", resource=download_sql, span_type=SpanTypes.SQL
), tracer.trace(
name="postgres.query", service="postgres", resource=download_sql, span_type=SpanTypes.SQL
):
try:
log_time = time.perf_counter()
temp_env = os.environ.copy()
if download_job and not download_job.monthly_download:
# Since terminating the process isn't guaranteed to end the DB statement, add timeout to client connection
temp_env["PGOPTIONS"] = f"--statement-timeout={settings.DOWNLOAD_DB_TIMEOUT_IN_HOURS}h"
cat_command = subprocess.Popen(["cat", temp_sql_file_path], stdout=subprocess.PIPE)
subprocess.check_output(
["psql", "-q", "-o", source_path, retrieve_db_string(), "-v", "ON_ERROR_STOP=1"],
stdin=cat_command.stdout,
stderr=subprocess.STDOUT,
env=temp_env,
)
duration = time.perf_counter() - log_time
write_to_log(
message=f"Wrote {os.path.basename(source_path)}, took {duration:.4f} seconds", download_job=download_job
)
except Exception as e:
if not settings.IS_LOCAL:
# Not logging the command as it can contain the database connection string
e.cmd = "[redacted psql command]"
logger.error(e)
sql = subprocess.check_output(["cat", temp_sql_file_path]).decode()
logger.error(f"Faulty SQL: {sql}")
raise e
def retrieve_db_string():
"""It is necessary for this to be a function so the test suite can mock the connection string"""
return settings.DOWNLOAD_DATABASE_URL
def strip_file_extension(file_name):
return os.path.splitext(os.path.basename(file_name))[0]
def fail_download(download_job, exception, message):
stack_trace = "".join(
traceback.format_exception(etype=type(exception), value=exception, tb=exception.__traceback__)
)
download_job.error_message = f"{message}:\n{stack_trace}"
download_job.job_status_id = JOB_STATUS_DICT["failed"]
download_job.save()
def add_data_dictionary_to_zip(working_dir, zip_file_path):
write_to_log(message="Adding data dictionary to zip file")
data_dictionary_file_name = "Data_Dictionary_Crosswalk.xlsx"
data_dictionary_file_path = os.path.join(working_dir, data_dictionary_file_name)
data_dictionary_url = settings.DATA_DICTIONARY_DOWNLOAD_URL
RetrieveFileFromUri(data_dictionary_url).copy(data_dictionary_file_path)
append_files_to_zip_file([data_dictionary_file_path], zip_file_path)
def _kill_spawned_processes(download_job=None):
"""Cleanup (kill) any spawned child processes during this job run"""
job = ps.Process(os.getpid())
for spawn_of_job in job.children(recursive=True):
write_to_log(
message=f"Attempting to terminate child process with PID [{spawn_of_job.pid}] and name "
f"[{spawn_of_job.name}]",
download_job=download_job,
is_error=True,
)
try:
spawn_of_job.kill()
except ps.NoSuchProcess:
pass
def create_empty_data_file(
source: DownloadSource,
download_job: DownloadJob,
working_dir: str,
piid: str,
assistance_id: str,
zip_file_path: str,
file_format: str,
) -> None:
data_file_name = build_data_file_name(source, download_job, piid, assistance_id)
extension = FILE_FORMATS[file_format]["extension"]
source.file_name = f"{data_file_name}.{extension}"
source_path = os.path.join(working_dir, source.file_name)
write_to_log(
message=f"Skipping download of {source.file_name} due to no valid columns provided", download_job=download_job
)
Path(source_path).touch()
append_files_to_zip_file([source_path], zip_file_path)
|
py | b4069f9fa93b52d15b2b77484920decbbd7c5e69 | import os
import time
import re
import pytest
from src.benchmark_metrics import (
TENSORFLOW2_INFERENCE_GPU_THRESHOLD,
TENSORFLOW2_INFERENCE_CPU_THRESHOLD,
TENSORFLOW1_INFERENCE_GPU_THRESHOLD,
TENSORFLOW1_INFERENCE_CPU_THRESHOLD,
)
from test.test_utils import BENCHMARK_RESULTS_S3_BUCKET, is_tf1
from test.test_utils.ec2 import (
ec2_performance_upload_result_to_s3_and_validate,
post_process_inference,
)
@pytest.mark.model("inception, RCNN-Resnet101-kitti, resnet50_v2, mnist, SSDResnet50Coco")
@pytest.mark.parametrize("ec2_instance_type", ["p3.16xlarge"], indirect=True)
def test_performance_ec2_tensorflow_inference_gpu(tensorflow_inference, ec2_connection, region, gpu_only):
threshold = (
TENSORFLOW1_INFERENCE_GPU_THRESHOLD if is_tf1(tensorflow_inference) else TENSORFLOW2_INFERENCE_GPU_THRESHOLD
)
ec2_performance_tensorflow_inference(tensorflow_inference, "gpu", ec2_connection, region, threshold)
@pytest.mark.model("inception, RCNN-Resnet101-kitti, resnet50_v2, mnist, SSDResnet50Coco")
@pytest.mark.parametrize("ec2_instance_type", ["c5.18xlarge"], indirect=True)
def test_performance_ec2_tensorflow_inference_cpu(tensorflow_inference, ec2_connection, region, cpu_only):
threshold = (
TENSORFLOW1_INFERENCE_CPU_THRESHOLD if is_tf1(tensorflow_inference) else TENSORFLOW2_INFERENCE_CPU_THRESHOLD
)
ec2_performance_tensorflow_inference(tensorflow_inference, "cpu", ec2_connection, region, threshold)
def ec2_performance_tensorflow_inference(image_uri, processor, ec2_connection, region, threshold):
docker_cmd = "nvidia-docker" if processor == "gpu" else "docker"
container_test_local_dir = os.path.join("$HOME", "container_tests")
tf_version = "1" if is_tf1(image_uri) else "2"
tf_api_version = "1.15" if tf_version == "1" else "2.3.0"
# Make sure we are logged into ECR so we can pull the image
ec2_connection.run(f"$(aws ecr get-login --no-include-email --region {region})", hide=True)
ec2_connection.run(f"{docker_cmd} pull -q {image_uri} ")
# Run performance inference command, display benchmark results to console
ec2_connection.run(f"pip3 install -U pip")
ec2_connection.run(
f"pip3 install boto3 grpcio tensorflow-serving-api=={tf_api_version} --user --no-warn-script-location"
)
time_str = time.strftime("%Y-%m-%d-%H-%M-%S")
commit_info = os.getenv("CODEBUILD_RESOLVED_SOURCE_VERSION")
log_file = f"synthetic_{commit_info}_{time_str}.log"
ec2_connection.run(
f"python3 {container_test_local_dir}/bin/benchmark/tf{tf_version}_serving_perf.py "
f"--processor {processor} --docker_image_name {image_uri} --run_all_s3 --binary /usr/bin/tensorflow_model_server --get_perf --iterations 1000 "
f"2>&1 | tee {log_file}"
)
ec2_performance_upload_result_to_s3_and_validate(
ec2_connection, image_uri, log_file, "synthetic", threshold, post_process_inference, log_file,
)
|
py | b406a0679642125eb224bced65768bfbcd64ab08 | import json
escalation_config = {
"share_reward": False,
"shape_reward": False,
"shape_beta": 0.8,
"defect_coef": -0.9,
"symmetry_plan": None
}
for i in range(10):
escalation_config["defect_coef"] = - i / 10
json.dump(escalation_config, open(f"./env-configs/escalation-gw-rr/-0.{i}.json", "w")) |
py | b406a109fb33bf521ecf9f3b2a9579c5bbd6c478 | """
保存autodata的唯一标识
create by judy 2019/08/15
"""
import traceback
from datetime import datetime
import pytz
from commonbaby.sql import (SqliteColumn, SqliteConn,
SqliteTable, table_locker)
from .sqliteconfig import SqliteConfig
from .tbsqlitebase import TbSqliteBase
class TbUnEXPDBData(TbSqliteBase):
__tb_autodata: SqliteTable = SqliteTable(
'undata',
True,
SqliteColumn(
colname='Id',
coltype='INTEGER',
nullable=False,
is_primary_key=True,
is_auto_increament=True,
is_unique=True).set_index_new(),
SqliteColumn(colname='UniqueId', nullable=False).set_index_new(),
SqliteColumn(colname='DownloadTime', coltype='DATETIME', nullable=False),
)
databasename = 'expdbdata'
def __init__(self, dbcfg: SqliteConfig):
TbSqliteBase.__init__(self, TbUnEXPDBData.__tb_autodata._tbname, dbcfg, TbUnEXPDBData.databasename)
def _append_tables(self):
self._conn_mngr.append_table(TbUnEXPDBData.__tb_autodata)
@table_locker(__tb_autodata._tbname)
def insert_identify(self, unique_info):
"""
存储数据的唯一标识
:param unique_info:
:return:
"""
sql = '''
INSERT INTO undata(
UniqueId,
DownloadTime
)VALUES (?, ?)
'''
time_str = datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')
pars = (unique_info, time_str)
res = False
conn: SqliteConn = None
try:
conn: SqliteConn = self.connect_write(5)
c = conn.cursor
result = c.execute(sql, pars)
if result is None or result.rowcount < 1: # or len(result) < 1:
res = False
else:
res = True
except:
self._logger.error(f"Insert auto unique data error,err:{traceback.format_exc()}")
finally:
if conn is not None:
conn.commit()
conn.close()
return res
@table_locker(__tb_autodata._tbname)
def identify_count(self, unique_info) -> bool:
"""
查询数据库中是否已经下载了该数据
:param unique_info:
:return:
"""
conn: SqliteConn = False
res: bool = False
sql = """select count(1) from undata where UniqueId=?"""
pars = (unique_info,)
try:
for conn in self.connect_all(5):
try:
conn: SqliteConn = conn
c = conn.cursor
result = c.execute(sql, pars)
for c in result:
# print(c)
if len(c) > 0 and c[0] > 0:
res = True
break
except Exception as ex:
conn._conn.rollback()
raise ex
finally:
if conn is not None:
conn.close()
if res:
break
except:
self._logger.error(f"Count auto unique data error,err:{traceback.format_exc()}")
finally:
if conn is not None:
conn.commit()
conn.close()
return res
|
py | b406a2bd139ab2f7deff572cf02ef40f6f33a647 | _base_ = [
'../_base_/datasets/ade20k_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='SDModuleMT',
cfg_s=dict(
type='EncoderDecoder',
pretrained='pretrained/mit_b0.pth',
backbone=dict(
type='mit_b0',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[32, 64, 160, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=256),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
cfg_t=[dict(
type='EncoderDecoder',
backbone=dict(
type='mit_b1',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=256),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
dict(
type='EncoderDecoder',
backbone=dict(
type='mit_b2',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=768),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
dict(
type='EncoderDecoder',
backbone=dict(
type='mit_b3',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=768),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
],
distillation = [
{'student_layer':'decode_head.linear_pred',
'teacher_layer':'decode_head.linear_pred',
'loss_name':'MTLoss',
'loss_config':{
'weight':2,
'tau':1,
'reshape_config':'logits',
'resize_config':{'mode':'bilinear','align_corners':False},
'transform_config':{'loss_type':'channel','group_size':10},
'latestart_config':0,
'earlystop_config':120000,
# 'rot_config':[0,100]
},
},
{'student_layer':'decode_head.linear_pred',
'teacher_layer':'decode_head.linear_pred',
'loss_name':'MTLoss',
'loss_config':{
'weight':2,
'tau':1,
'reshape_config':'logits',
'resize_config':{'mode':'bilinear','align_corners':False},
'transform_config':{'loss_type':'channel','group_size':10},
'latestart_config':100,
'earlystop_config':120000,
# 'rot_config':[1,100]
},
},
{'student_layer':'decode_head.linear_pred',
'teacher_layer':'decode_head.linear_pred',
'loss_name':'MTLoss',
'loss_config':{
'weight':2,
'tau':1,
'reshape_config':'logits',
'resize_config':{'mode':'bilinear','align_corners':False},
'transform_config':{'loss_type':'channel','group_size':10},
'latestart_config':200,
'earlystop_config':120000,
# 'rot_config':[2,100]
},
},
],
t_pretrain = ['./pretrained/segformer.b1.512x512.ade.160k.pth',\
'./pretrained/segformer.b2.512x512.ade.160k.pth',
'./pretrained/segformer.b3.512x512.ade.160k.pth'],
train_cfg=dict(),
test_cfg=dict(mode='whole'),
)
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9,0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
work_dir = '/apdcephfs/private_inchzhang/shared_info/10.24/MT_example'
data = dict(samples_per_gpu=2)
evaluation = dict(interval=16000, metric='mIoU')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# resume_from = ''
|
py | b406a2cc7eb18b9a9cb3e58d68c955aabff3078c | # -*- coding: utf-8 -*-
""" S3 Extensions for gluon.dal.Field, reusable fields
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2018 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import sys
from itertools import chain
from uuid import uuid4
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.html import *
#from gluon.validators import *
from gluon.storage import Storage
from gluon.languages import lazyT
from s3dal import Query, SQLCustomType
from s3datetime import S3DateTime
from s3navigation import S3ScriptItem
from s3utils import s3_auth_user_represent, s3_auth_user_represent_name, s3_unicode, s3_str, S3MarkupStripper
from s3validators import IS_ISO639_2_LANGUAGE_CODE, IS_ONE_OF, IS_UTC_DATE, IS_UTC_DATETIME
from s3widgets import S3CalendarWidget, S3DateWidget
# =============================================================================
class FieldS3(Field):
"""
S3 extensions of the gluon.sql.Field clas
If Server Side Pagination is on, the proper CAST is needed to
match the lookup table id
"""
def __init__(self, fieldname,
type="string",
length=None,
default=None,
required=False,
requires="<default>",
ondelete="CASCADE",
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
compute=None,
sortby=None):
self.sortby = sortby
Field.__init__(self,
fieldname,
type,
length,
default,
required,
requires,
ondelete,
notnull,
unique,
uploadfield,
widget,
label,
comment,
writable,
readable,
update,
authorize,
autodelete,
represent,
uploadfolder,
compute)
# -------------------------------------------------------------------------
def join_via(self, value):
if self.type.find("reference") == 0:
return Query(self, "=", value)
else:
return QueryS3(self, "join_via", value)
# =============================================================================
class QueryS3(Query):
"""
S3 extensions of the gluon.sql.Query class
If Server Side Pagination is on, the proper CAST is needed to match
the string-typed id to lookup table id
"""
def __init__(self, left, op=None, right=None):
if op != "join_via":
Query.__init__(self, left, op, right)
else:
self.sql = "CAST(TRIM(%s,"|") AS INTEGER)=%s" % (left, right)
# =============================================================================
def s3_fieldmethod(name, f, represent=None, search_field=None):
"""
Helper to attach a representation method to a Field.Method.
@param name: the field name
@param f: the field method
@param represent: the representation function
@param search_field: the field to use for searches
- only used by datatable_filter currently
- can only be a single field in the same table currently
"""
if represent is None and search_field is None:
fieldmethod = Field.Method(name, f)
else:
class Handler(object):
def __init__(self, method, row):
self.method=method
self.row=row
def __call__(self, *args, **kwargs):
return self.method(self.row, *args, **kwargs)
if represent is not None:
if hasattr(represent, "bulk"):
Handler.represent = represent
else:
Handler.represent = staticmethod(represent)
if search_field is not None:
Handler.search_field = search_field
fieldmethod = Field.Method(name, f, handler=Handler)
return fieldmethod
# =============================================================================
class S3ReusableField(object):
"""
DRY Helper for reusable fields:
This creates neither a Table nor a Field, but just
an argument store. The field is created with the __call__
method, which is faster than copying an existing field.
"""
def __init__(self, name, type="string", **attr):
self.name = name
self.__type = type
self.attr = Storage(attr)
# -------------------------------------------------------------------------
def __call__(self, name=None, **attr):
if not name:
name = self.name
ia = Storage(self.attr)
DEFAULT = "default"
widgets = ia.pop("widgets", {})
if attr:
empty = attr.pop("empty", True)
if not empty:
requires = ia.requires
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
requires = r.other
ia.update(requires=requires)
widget = attr.pop("widget", DEFAULT)
ia.update(**attr)
else:
widget = DEFAULT
if isinstance(widget, basestring):
if widget == DEFAULT and "widget" in ia:
widget = ia.widget
else:
if not isinstance(widgets, dict):
widgets = {DEFAULT: widgets}
if widget != DEFAULT and widget not in widgets:
raise NameError("Undefined widget: %s" % widget)
else:
widget = widgets.get(widget)
ia.widget = widget
if "script" in ia:
if ia.script:
if ia.comment:
ia.comment = TAG[""](ia.comment,
S3ScriptItem(script=ia.script))
else:
ia.comment = S3ScriptItem(script=ia.script)
del ia["script"]
if ia.sortby is not None:
return FieldS3(name, self.__type, **ia)
else:
return Field(name, self.__type, **ia)
# =============================================================================
class S3Represent(object):
"""
Scalable universal field representation for option fields and
foreign keys. Can be subclassed and tailored to the particular
model where necessary.
@group Configuration (in the model): __init__
@group API (to apply the method): __call__,
multiple,
bulk,
render_list
@group Prototypes (to adapt in subclasses): lookup_rows,
represent_row,
link
@group Internal Methods: _setup,
_lookup
"""
def __init__(self,
lookup=None,
key=None,
fields=None,
labels=None,
options=None,
translate=False,
linkto=None,
show_link=False,
multiple=False,
hierarchy=False,
default=None,
none=None,
field_sep=" "
):
"""
Constructor
@param lookup: the name of the lookup table
@param key: the field name of the primary key of the lookup table,
a field name
@param fields: the fields to extract from the lookup table, a list
of field names
@param labels: string template or callable to represent rows from
the lookup table, callables must return a string
@param options: dictionary of options to lookup the representation
of a value, overrides lookup and key
@param multiple: web2py list-type (all values will be lists)
@param hierarchy: render a hierarchical representation, either
True or a string template like "%s > %s"
@param translate: translate all representations (using T)
@param linkto: a URL (as string) to link representations to,
with "[id]" as placeholder for the key
@param show_link: whether to add a URL to representations
@param default: default representation for unknown options
@param none: representation for empty fields (None or empty list)
@param field_sep: separator to use to join fields
"""
self.tablename = lookup
self.table = None
self.key = key
self.fields = fields
self.labels = labels
self.options = options
self.list_type = multiple
self.hierarchy = hierarchy
self.translate = translate
self.linkto = linkto
self.show_link = show_link
self.default = default
self.none = none
self.field_sep = field_sep
self.setup = False
self.theset = None
self.queries = 0
self.lazy = []
self.lazy_show_link = False
self.rows = {}
# Attributes to simulate being a function for sqlhtml's represent()
# Make sure we indicate only 1 position argument
self.func_code = Storage(co_argcount = 1)
self.func_defaults = None
if hasattr(self, "lookup_rows"):
self.custom_lookup = True
else:
self.lookup_rows = self._lookup_rows
self.custom_lookup = False
# -------------------------------------------------------------------------
def _lookup_rows(self, key, values, fields=[]):
"""
Lookup all rows referenced by values.
(in foreign key representations)
@param key: the key Field
@param values: the values
@param fields: the fields to retrieve
"""
fields.append(key)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(*fields)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row, prefix=None):
"""
Represent the referenced row.
(in foreign key representations)
@param row: the row
@return: the representation of the Row, or None if there
is an error in the Row
"""
labels = self.labels
translated = False
if self.slabels:
# String Template or lazyT
try:
row_dict = row.as_dict()
except AttributeError:
# Row just a dict/Storage after all? (e.g. custom lookup)
row_dict = row
# Represent None as self.none
none = self.none
for k, v in row_dict.items():
if v is None:
row_dict[k] = self.none
v = labels % row_dict
elif self.clabels:
# External Renderer
v = labels(row)
else:
# Default
values = [row[f] for f in self.fields if row[f] not in (None, "")]
if len(values) > 1:
# Multiple values => concatenate with separator
if self.translate:
# Translate items individually before concatenating
T = current.T
values = [T(v) if not type(v) is lazyT else v for v in values]
translated = True
sep = self.field_sep
v = sep.join([s3_str(v) for v in values])
elif values:
v = s3_str(values[0])
else:
v = self.none
if not translated and self.translate and not type(v) is lazyT:
output = current.T(v)
else:
output = v
if prefix and self.hierarchy:
return self.htemplate % (prefix, output)
return output
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
- Typically, k is a foreign key value, and v the
representation of the referenced record, and the link
shall open a read view of the referenced record.
- In the base class, the linkto-parameter expects a URL (as
string) with "[id]" as placeholder for the key.
@param k: the key
@param v: the representation of the key
@param row: the row with this key (unused in the base class)
"""
if self.linkto:
k = s3_str(k)
return A(v, _href=self.linkto.replace("[id]", k) \
.replace("%5Bid%5D", k))
else:
return v
# -------------------------------------------------------------------------
def __call__(self, value, row=None, show_link=True):
"""
Represent a single value (standard entry point).
@param value: the value
@param row: the referenced row (if value is a foreign key)
@param show_link: render the representation as link
"""
self._setup()
show_link = show_link and self.show_link
if self.list_type:
# Is a list-type => use multiple
return self.multiple(value,
rows=row,
list_type=False,
show_link=show_link)
# Prefer the row over the value
if row and self.table:
value = row[self.key]
# Lookup the representation
if value:
rows = [row] if row is not None else None
items = self._lookup([value], rows=rows)
if value in items:
k, v = value, items[value]
r = self.link(k, v, row=self.rows.get(k)) \
if show_link else items[value]
else:
r = self.default
return r
return self.none
# -------------------------------------------------------------------------
def multiple(self, values, rows=None, list_type=True, show_link=True):
"""
Represent multiple values as a comma-separated list.
@param values: list of values
@param rows: the referenced rows (if values are foreign keys)
@param show_link: render each representation as link
"""
self._setup()
show_link = show_link and self.show_link
# Get the values
if rows and self.table:
key = self.key
values = [row[key] for row in rows]
elif self.list_type and list_type:
try:
hasnone = None in values
if hasnone:
values = [i for i in values if i != None]
values = list(set(chain.from_iterable(values)))
if hasnone:
values.append(None)
except TypeError:
raise ValueError("List of lists expected, got %s" % values)
else:
values = [values] if type(values) is not list else values
# Lookup the representations
if values:
default = self.default
items = self._lookup(values, rows=rows)
if show_link:
link = self.link
rows = self.rows
labels = [[link(k, s3_str(items[k]), row=rows.get(k)), ", "]
if k in items else [default, ", "]
for k in values]
if labels:
return TAG[""](list(chain.from_iterable(labels))[:-1])
else:
return ""
else:
labels = [s3_str(items[k])
if k in items else default for k in values]
if labels:
return ", ".join(labels)
return self.none
# -------------------------------------------------------------------------
def bulk(self, values, rows=None, list_type=True, show_link=True):
"""
Represent multiple values as dict {value: representation}
@param values: list of values
@param rows: the rows
@param show_link: render each representation as link
@return: a dict {value: representation}
@note: for list-types, the dict keys will be the individual
values within all lists - and not the lists (simply
because lists can not be dict keys). Thus, the caller
would still have to construct the final string/HTML.
"""
self._setup()
show_link = show_link and self.show_link
# Get the values
if rows and self.table:
key = self.key
_rows = self.rows
values = set()
add_value = values.add
for row in rows:
value = row[key]
_rows[value] = row
add_value(value)
values = list(values)
elif self.list_type and list_type:
try:
hasnone = None in values
if hasnone:
values = [i for i in values if i != None]
values = list(set(chain.from_iterable(values)))
if hasnone:
values.append(None)
except TypeError:
raise ValueError("List of lists expected, got %s" % values)
else:
values = [values] if type(values) is not list else values
# Lookup the representations
if values:
labels = self._lookup(values, rows=rows)
if show_link:
link = self.link
rows = self.rows
labels = dict((k, link(k, v, rows.get(k)))
for k, v in labels.items())
for k in values:
if k not in labels:
labels[k] = self.default
else:
labels = {}
labels[None] = self.none
return labels
# -------------------------------------------------------------------------
def render_list(self, value, labels, show_link=True):
"""
Helper method to render list-type representations from
bulk()-results.
@param value: the list
@param labels: the labels as returned from bulk()
@param show_link: render references as links, should
be the same as used with bulk()
"""
show_link = show_link and self.show_link
if show_link:
labels = [(labels[v], ", ")
if v in labels else (self.default, ", ")
for v in value]
if labels:
return TAG[""](list(chain.from_iterable(labels))[:-1])
else:
return ""
else:
return ", ".join([s3_str(labels[v])
if v in labels else self.default
for v in value])
# -------------------------------------------------------------------------
def _setup(self):
""" Lazy initialization of defaults """
if self.setup:
return
self.queries = 0
# Default representations
messages = current.messages
if self.default is None:
self.default = s3_str(messages.UNKNOWN_OPT)
if self.none is None:
self.none = messages["NONE"]
# Initialize theset
if self.options is not None:
if self.translate:
T = current.T
self.theset = dict((opt, T(label))
if isinstance(label, basestring) else (opt, label)
for opt, label in self.options.items()
)
else:
self.theset = self.options
else:
self.theset = {}
# Lookup table parameters and linkto
if self.table is None:
tablename = self.tablename
if tablename:
table = current.s3db.table(tablename)
if table is not None:
if self.key is None:
self.key = table._id.name
if not self.fields:
if "name" in table:
self.fields = ["name"]
else:
self.fields = [self.key]
self.table = table
if self.linkto is None and self.show_link:
c, f = tablename.split("_", 1)
self.linkto = URL(c=c, f=f, args=["[id]"], extension="")
# What type of renderer do we use?
labels = self.labels
# String template?
self.slabels = isinstance(labels, (basestring, lazyT))
# External renderer?
self.clabels = callable(labels)
# Hierarchy template
if isinstance(self.hierarchy, basestring):
self.htemplate = self.hierarchy
else:
self.htemplate = "%s > %s"
self.setup = True
# -------------------------------------------------------------------------
def _lookup(self, values, rows=None):
"""
Lazy lookup values.
@param values: list of values to lookup
@param rows: rows referenced by values (if values are foreign keys)
optional
"""
theset = self.theset
keys = {}
items = {}
lookup = {}
# Check whether values are already in theset
table = self.table
for _v in values:
v = _v
if v is not None and table and isinstance(v, basestring):
try:
v = int(_v)
except ValueError:
pass
keys[v] = _v
if v is None:
items[_v] = self.none
elif v in theset:
items[_v] = theset[v]
else:
lookup[v] = True
if table is None or not lookup:
return items
if table and self.hierarchy:
# Does the lookup table have a hierarchy?
from s3hierarchy import S3Hierarchy
h = S3Hierarchy(table._tablename)
if h.config:
def lookup_parent(node_id):
parent = h.parent(node_id)
if parent and \
parent not in theset and \
parent not in lookup:
lookup[parent] = False
lookup_parent(parent)
return
for node_id in lookup.keys():
lookup_parent(node_id)
else:
h = None
else:
h = None
# Get the primary key
pkey = self.key
ogetattr = object.__getattribute__
try:
key = ogetattr(table, pkey)
except AttributeError:
return items
# Use the given rows to lookup the values
pop = lookup.pop
represent_row = self.represent_row
represent_path = self._represent_path
if rows and not self.custom_lookup:
rows_ = dict((row[key], row) for row in rows)
self.rows.update(rows_)
for row in rows:
k = row[key]
if k not in theset:
if h:
theset[k] = represent_path(k,
row,
rows = rows_,
hierarchy = h,
)
else:
theset[k] = represent_row(row)
if pop(k, None):
items[keys.get(k, k)] = theset[k]
# Retrieve additional rows as needed
if lookup:
if not self.custom_lookup:
try:
# Need for speed: assume all fields are in table
fields = [ogetattr(table, f) for f in self.fields]
except AttributeError:
# Ok - they are not: provide debug output and filter fields
current.log.error(sys.exc_info()[1])
fields = [ogetattr(table, f)
for f in self.fields if hasattr(table, f)]
else:
fields = []
rows = self.lookup_rows(key, lookup.keys(), fields=fields)
rows = dict((row[key], row) for row in rows)
self.rows.update(rows)
if h:
for k, row in rows.items():
if lookup.pop(k, None):
items[keys.get(k, k)] = represent_path(k,
row,
rows = rows,
hierarchy = h,
)
else:
for k, row in rows.items():
lookup.pop(k, None)
items[keys.get(k, k)] = theset[k] = represent_row(row)
# Anything left gets set to default
if lookup:
for k in lookup:
items[keys.get(k, k)] = self.default
return items
# -------------------------------------------------------------------------
def _represent_path(self, value, row, rows=None, hierarchy=None):
"""
Recursive helper method to represent value as path in
a hierarchy.
@param value: the value
@param row: the row containing the value
@param rows: all rows from _loopup as dict
@param hierarchy: the S3Hierarchy instance
"""
theset = self.theset
if value in theset:
return theset[value]
prefix = None
parent = hierarchy.parent(value)
if parent:
if parent in theset:
prefix = theset[parent]
elif parent in rows:
prefix = self._represent_path(parent,
rows[parent],
rows=rows,
hierarchy=hierarchy)
result = self.represent_row(row, prefix=prefix)
theset[value] = result
return result
# =============================================================================
class S3RepresentLazy(object):
"""
Lazy Representation of a field value, utilizes the bulk-feature
of S3Represent-style representation methods
"""
def __init__(self, value, renderer):
"""
Constructor
@param value: the value
@param renderer: the renderer (S3Represent instance)
"""
self.value = value
self.renderer = renderer
self.multiple = False
renderer.lazy.append(value)
# -------------------------------------------------------------------------
def __repr__(self):
return s3_str(self.represent())
# -------------------------------------------------------------------------
def represent(self):
""" Represent as string """
value = self.value
renderer = self.renderer
if renderer.lazy:
labels = renderer.bulk(renderer.lazy, show_link=False)
renderer.lazy = []
else:
labels = renderer.theset
if renderer.list_type:
if self.multiple:
return renderer.multiple(value, show_link=False)
else:
return renderer.render_list(value, labels, show_link=False)
else:
if self.multiple:
return renderer.multiple(value, show_link=False)
else:
return renderer(value, show_link=False)
# -------------------------------------------------------------------------
def render(self):
""" Render as HTML """
value = self.value
renderer = self.renderer
if renderer.lazy:
labels = renderer.bulk(renderer.lazy)
renderer.lazy = []
else:
labels = renderer.theset
if renderer.list_type:
if not value:
value = []
if self.multiple:
if len(value) and type(value[0]) is not list:
value = [value]
return renderer.multiple(value)
else:
return renderer.render_list(value, labels)
else:
if self.multiple:
return renderer.multiple(value)
else:
return renderer(value)
# -------------------------------------------------------------------------
def render_node(self, element, attributes, name):
"""
Render as text or attribute of an XML element
@param element: the element
@param attributes: the attributes dict of the element
@param name: the attribute name
"""
# Render value
text = s3_unicode(self.represent())
# Strip markup + XML-escape
if text and "<" in text:
try:
stripper = S3MarkupStripper()
stripper.feed(text)
text = stripper.stripped()
except:
pass
# Add to node
if text is not None:
if element is not None:
element.text = text
else:
attributes[name] = text
return
# =============================================================================
# Record identity meta-fields
# Use URNs according to http://tools.ietf.org/html/rfc4122
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = lambda x: "%s" % (uuid4().urn
if x == ""
else str(x.encode("utf-8"))),
decoder = lambda x: x)
#if db and current.db._adapter.represent("X", s3uuid) != "'X'":
# # Old web2py DAL, must add quotes in encoder
# s3uuid = SQLCustomType(type = "string",
# native = "VARCHAR(128)",
# encoder = (lambda x: "'%s'" % (uuid4().urn
# if x == ""
# else str(x.encode("utf-8")).replace("'", "''"))),
# decoder = (lambda x: x))
# Universally unique identifier for a record
s3_meta_uuid = S3ReusableField("uuid", type=s3uuid,
length = 128,
notnull = True,
unique = True,
readable = False,
writable = False,
default = "")
# Master-Copy-Index (for Sync)
s3_meta_mci = S3ReusableField("mci", "integer",
default = 0,
readable = False,
writable = False)
def s3_uid():
return (s3_meta_uuid(),
s3_meta_mci())
# =============================================================================
# Record "soft"-deletion meta-fields
# "Deleted"-flag
s3_meta_deletion_status = S3ReusableField("deleted", "boolean",
default = False,
readable = False,
writable = False)
# Parked foreign keys of a deleted record in JSON format
# => to be restored upon "un"-delete
s3_meta_deletion_fk = S3ReusableField("deleted_fk", #"text",
readable = False,
writable = False)
# ID of the record replacing this record
# => for record merger (de-duplication)
s3_meta_deletion_rb = S3ReusableField("deleted_rb", "integer",
readable = False,
writable = False)
def s3_deletion_status():
return (s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_deletion_rb())
# =============================================================================
# Record timestamp meta-fields
s3_meta_created_on = S3ReusableField("created_on", "datetime",
readable = False,
writable = False,
default = lambda: \
datetime.datetime.utcnow())
s3_meta_modified_on = S3ReusableField("modified_on", "datetime",
readable = False,
writable = False,
default = lambda: \
datetime.datetime.utcnow(),
update = lambda: \
datetime.datetime.utcnow())
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on())
# =============================================================================
# Record authorship meta-fields
def s3_authorstamp():
"""
Record ownership meta-fields
"""
auth = current.auth
utable = auth.settings.table_user
if auth.is_logged_in():
# Not current.auth.user to support impersonation
current_user = current.session.auth.user.id
else:
current_user = None
if current.deployment_settings.get_ui_auth_user_represent() == "name":
represent = s3_auth_user_represent_name
else:
represent = s3_auth_user_represent
# Author of a record
s3_meta_created_by = S3ReusableField("created_by", utable,
readable = False,
writable = False,
requires = None,
default = current_user,
represent = represent,
ondelete = "RESTRICT")
# Last author of a record
s3_meta_modified_by = S3ReusableField("modified_by", utable,
readable = False,
writable = False,
requires = None,
default = current_user,
update = current_user,
represent = represent,
ondelete = "RESTRICT")
return (s3_meta_created_by(),
s3_meta_modified_by())
# =============================================================================
def s3_ownerstamp():
"""
Record ownership meta-fields
"""
auth = current.auth
utable = auth.settings.table_user
# Individual user who owns the record
s3_meta_owned_by_user = S3ReusableField("owned_by_user", utable,
readable = False,
writable = False,
requires = None,
# Not current.auth.user to support impersonation
default = current.session.auth.user.id
if auth.is_logged_in()
else None,
represent = lambda id: \
id and s3_auth_user_represent(id) or \
current.messages.UNKNOWN_OPT,
ondelete="RESTRICT")
# Role of users who collectively own the record
s3_meta_owned_by_group = S3ReusableField("owned_by_group", "integer",
readable = False,
writable = False,
requires = None,
default = None,
represent = S3Represent(lookup="auth_group",
fields=["role"])
)
# Person Entity controlling access to this record
s3_meta_realm_entity = S3ReusableField("realm_entity", "integer",
default = None,
readable = False,
writable = False,
requires = None,
# use a lambda here as we don't
# want the model to be loaded yet
represent = lambda val: \
current.s3db.pr_pentity_represent(val))
return (s3_meta_owned_by_user(),
s3_meta_owned_by_group(),
s3_meta_realm_entity())
# =============================================================================
def s3_meta_fields():
"""
Normal meta-fields added to every table
"""
# Approver of a record
s3_meta_approved_by = S3ReusableField("approved_by", "integer",
readable = False,
writable = False,
requires = None,
represent = s3_auth_user_represent)
fields = (s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_deletion_rb(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_approved_by(),
)
fields = (fields + s3_authorstamp() + s3_ownerstamp())
return fields
def s3_all_meta_field_names():
return [field.name for field in s3_meta_fields()]
# =============================================================================
# Reusable roles fields
def s3_role_required():
"""
Role Required to access a resource
- used by GIS for map layer permissions management
"""
T = current.T
gtable = current.auth.settings.table_group
represent = S3Represent(lookup="auth_group", fields=["role"])
f = S3ReusableField("role_required", gtable,
sortby="role",
requires = IS_EMPTY_OR(
IS_ONE_OF(current.db, "auth_group.id",
represent,
zero=T("Public"))),
#widget = S3AutocompleteWidget("admin",
# "group",
# fieldname="role"),
represent = represent,
label = T("Role Required"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role Required"),
T("If this record should be restricted then select which role is required to access the record here."))),
ondelete = "RESTRICT")
return f()
# -----------------------------------------------------------------------------
def s3_roles_permitted(name="roles_permitted", **attr):
"""
List of Roles Permitted to access a resource
- used by CMS
"""
T = current.T
represent = S3Represent(lookup="auth_group", fields=["role"])
if "label" not in attr:
attr["label"] = T("Roles Permitted")
if "sortby" not in attr:
attr["sortby"] = "role"
if "represent" not in attr:
attr["represent"] = represent
if "requires" not in attr:
attr["requires"] = IS_EMPTY_OR(IS_ONE_OF(current.db,
"auth_group.id",
represent,
multiple=True))
if "comment" not in attr:
attr["comment"] = DIV(_class="tooltip",
_title="%s|%s" % (T("Roles Permitted"),
T("If this record should be restricted then select which role(s) are permitted to access the record here.")))
if "ondelete" not in attr:
attr["ondelete"] = "RESTRICT"
f = S3ReusableField(name, "list:reference auth_group",
**attr)
return f()
# =============================================================================
def s3_comments(name="comments", **attr):
"""
Return a standard Comments field
"""
from s3widgets import s3_comments_widget
T = current.T
if "label" not in attr:
attr["label"] = T("Comments")
if "represent" not in attr:
# Support HTML markup
attr["represent"] = lambda comments: \
XML(comments) if comments else current.messages["NONE"]
if "widget" not in attr:
attr["widget"] = s3_comments_widget
if "comment" not in attr:
attr["comment"] = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Comments"),
T("Please use this field to record any additional information, including a history of the record if it is updated.")))
f = S3ReusableField(name, "text",
**attr)
return f()
# =============================================================================
def s3_currency(name="currency", **attr):
"""
Return a standard Currency field
@ToDo: Move to a Finance module?
"""
settings = current.deployment_settings
if "label" not in attr:
attr["label"] = current.T("Currency")
if "default" not in attr:
attr["default"] = settings.get_fin_currency_default()
if "requires" not in attr:
currency_opts = settings.get_fin_currencies()
attr["requires"] = IS_IN_SET(currency_opts.keys(),
zero=None)
if "writable" not in attr:
attr["writable"] = settings.get_fin_currency_writable()
f = S3ReusableField(name, length=3,
**attr)
return f()
# =============================================================================
def s3_language(name="language", **attr):
"""
Return a standard Language field
"""
if "label" not in attr:
attr["label"] = current.T("Language")
if "default" not in attr:
attr["default"] = current.deployment_settings.get_L10n_default_language()
empty = attr.pop("empty", None)
if empty:
zero = ""
else:
zero = None
list_from_settings = attr.pop("list_from_settings", True)
select = attr.pop("select", None) # None = Full list
translate = attr.pop("translate", True)
if select or not list_from_settings:
requires = IS_ISO639_2_LANGUAGE_CODE(select = select,
sort = True,
translate = translate,
zero = zero,
)
else:
# Use deployment_settings to show a limited list
requires = IS_ISO639_2_LANGUAGE_CODE(sort = True,
translate = translate,
zero = zero,
)
if "requires" not in attr:
if empty is False:
attr["requires"] = requires
else:
# Default
attr["requires"] = IS_EMPTY_OR(requires)
if "represent" not in attr:
attr["represent"] = requires.represent
f = S3ReusableField(name, length=8,
**attr)
return f()
# =============================================================================
def s3_date(name="date", **attr):
"""
Return a standard date-field
@param name: the field name
@keyword default: the field default, can be specified as "now" for
current date, or as Python date
@keyword past: number of selectable past months
@keyword future: number of selectable future months
@keyword widget: the form widget for the field, can be specified
as "date" for S3DateWidget, "calendar" for
S3CalendarWidget, or as a web2py FormWidget,
defaults to "calendar"
@keyword calendar: the calendar to use for this widget, defaults
to current.calendar
@keyword start_field: CSS selector for the start field for interval
selection
@keyword default_interval: the default interval
@keyword default_explicit: whether the user must click the field
to set the default, or whether it will
automatically be set when the value for
start_field is set
@keyword set_min: CSS selector for another date/time widget to
dynamically set the minimum selectable date/time to
the value selected in this widget
@keyword set_max: CSS selector for another date/time widget to
dynamically set the maximum selectable date/time to
the value selected in this widget
@note: other S3ReusableField keywords are also supported (in addition
to the above)
@note: calendar-option requires widget="calendar" (default), otherwise
Gregorian calendar is enforced for the field
@note: set_min/set_max only supported for widget="calendar" (default)
@note: interval options currently not supported by S3CalendarWidget,
only available with widget="date"
@note: start_field and default_interval should be given together
@note: sets a default field label "Date" => use label-keyword to
override if necessary
@note: sets a default validator IS_UTC_DATE => use requires-keyword
to override if necessary
@note: sets a default representation S3DateTime.date_represent => use
represent-keyword to override if necessary
@ToDo: Different default field name in case we need to start supporting
Oracle, where 'date' is a reserved word
"""
attributes = dict(attr)
# Calendar
calendar = attributes.pop("calendar", None)
# Past and future options
past = attributes.pop("past", None)
future = attributes.pop("future", None)
# Label
if "label" not in attributes:
attributes["label"] = current.T("Date")
# Widget-specific options (=not intended for S3ReusableField)
WIDGET_OPTIONS = ("start_field",
"default_interval",
"default_explicit",
"set_min",
"set_max",
)
# Widget
widget = attributes.get("widget", "calendar")
widget_options = {}
if widget == "date":
# Legacy: S3DateWidget
# @todo: deprecate (once S3CalendarWidget supports all legacy options)
# Must use Gregorian calendar
calendar = "Gregorian"
# Past/future options
if past is not None:
widget_options["past"] = past
if future is not None:
widget_options["future"] = future
# Supported additional widget options
SUPPORTED_OPTIONS = ("start_field",
"default_interval",
"default_explicit",
)
for option in WIDGET_OPTIONS:
if option in attributes:
if option in SUPPORTED_OPTIONS:
widget_options[option] = attributes[option]
del attributes[option]
widget = S3DateWidget(**widget_options)
elif widget == "calendar":
# Default: calendar widget
widget_options["calendar"] = calendar
# Past/future options
if past is not None:
widget_options["past_months"] = past
if future is not None:
widget_options["future_months"] = future
# Supported additional widget options
SUPPORTED_OPTIONS = ("set_min",
"set_max",
)
for option in WIDGET_OPTIONS:
if option in attributes:
if option in SUPPORTED_OPTIONS:
widget_options[option] = attributes[option]
del attributes[option]
widget = S3CalendarWidget(**widget_options)
else:
# Drop all widget options
for option in WIDGET_OPTIONS:
attributes.pop(option, None)
attributes["widget"] = widget
# Default value
now = current.request.utcnow.date()
if attributes.get("default") == "now":
attributes["default"] = now
# Representation
if "represent" not in attributes:
attributes["represent"] = lambda dt: \
S3DateTime.date_represent(dt,
utc=True,
calendar=calendar,
)
# Validator
if "requires" not in attributes:
if past is None and future is None:
requires = IS_UTC_DATE(calendar=calendar)
else:
from dateutil.relativedelta import relativedelta
minimum = maximum = None
if past is not None:
minimum = now - relativedelta(months = past)
if future is not None:
maximum = now + relativedelta(months = future)
requires = IS_UTC_DATE(calendar=calendar,
minimum=minimum,
maximum=maximum,
)
empty = attributes.pop("empty", None)
if empty is False:
attributes["requires"] = requires
else:
# Default
attributes["requires"] = IS_EMPTY_OR(requires)
f = S3ReusableField(name, "date", **attributes)
return f()
# =============================================================================
def s3_datetime(name="date", **attr):
"""
Return a standard datetime field
@param name: the field name
@keyword default: the field default, can be specified as "now" for
current date/time, or as Python date
@keyword past: number of selectable past hours
@keyword future: number of selectable future hours
@keyword widget: form widget option, can be specified as "date"
for date-only, or "datetime" for date+time (default),
or as a web2py FormWidget
@keyword calendar: the calendar to use for this field, defaults
to current.calendar
@keyword set_min: CSS selector for another date/time widget to
dynamically set the minimum selectable date/time to
the value selected in this widget
@keyword set_max: CSS selector for another date/time widget to
dynamically set the maximum selectable date/time to
the value selected in this widget
@note: other S3ReusableField keywords are also supported (in addition
to the above)
@note: sets a default field label "Date" => use label-keyword to
override if necessary
@note: sets a default validator IS_UTC_DATE/IS_UTC_DATETIME => use
requires-keyword to override if necessary
@note: sets a default representation S3DateTime.date_represent or
S3DateTime.datetime_represent respectively => use the
represent-keyword to override if necessary
@ToDo: Different default field name in case we need to start supporting
Oracle, where 'date' is a reserved word
"""
attributes = dict(attr)
# Calendar
calendar = attributes.pop("calendar", None)
# Limits
limits = {}
for keyword in ("past", "future", "min", "max"):
if keyword in attributes:
limits[keyword] = attributes[keyword]
del attributes[keyword]
# Compute earliest/latest
widget = attributes.pop("widget", None)
now = current.request.utcnow
if widget == "date":
# Helper function to convert past/future hours into
# earliest/latest datetime, retaining day of month and
# time of day
def limit(delta):
current_month = now.month
years, hours = divmod(-delta, 8760)
months = divmod(hours, 744)[0]
if months > current_month:
years += 1
month = divmod((current_month - months) + 12, 12)[1]
year = now.year - years
return now.replace(month=month, year=year)
earliest = limits.get("min")
if not earliest:
past = limits.get("past")
if past is not None:
earliest = limit(-past)
latest = limits.get("max")
if not latest:
future = limits.get("future")
if future is not None:
latest = limit(future)
else:
# Compute earliest/latest
earliest = limits.get("min")
if not earliest:
past = limits.get("past")
if past is not None:
earliest = now - datetime.timedelta(hours=past)
latest = limits.get("max")
if not latest:
future = limits.get("future")
if future is not None:
latest = now + datetime.timedelta(hours=future)
# Label
if "label" not in attributes:
attributes["label"] = current.T("Date")
# Widget
set_min = attributes.pop("set_min", None)
set_max = attributes.pop("set_max", None)
date_only = False
if widget == "date":
date_only = True
widget = S3CalendarWidget(calendar = calendar,
timepicker = False,
minimum = earliest,
maximum = latest,
set_min = set_min,
set_max = set_max,
)
elif widget is None or widget == "datetime":
widget = S3CalendarWidget(calendar = calendar,
timepicker = True,
minimum = earliest,
maximum = latest,
set_min = set_min,
set_max = set_max,
)
attributes["widget"] = widget
# Default value
if attributes.get("default") == "now":
attributes["default"] = now
# Representation
represent = attributes.pop("represent", None)
represent_method = None
if represent == "date" or represent is None and date_only:
represent_method = S3DateTime.date_represent
elif represent is None:
represent_method = S3DateTime.datetime_represent
if represent_method:
represent = lambda dt: represent_method(dt,
utc=True,
calendar=calendar,
)
attributes["represent"] = represent
# Validator and empty-option
if "requires" not in attributes:
if date_only:
validator = IS_UTC_DATE
else:
validator = IS_UTC_DATETIME
requires = validator(calendar=calendar,
minimum=earliest,
maximum=latest,
)
empty = attributes.pop("empty", None)
if empty is False:
attributes["requires"] = requires
else:
attributes["requires"] = IS_EMPTY_OR(requires)
f = S3ReusableField(name, "datetime", **attributes)
return f()
# END =========================================================================
|
py | b406a3049b2971409dcc609c774d0b77aed8d2c6 | #!/usr/bin/env python
"""
This module contains some common routines used by other samples.
"""
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
from functools import reduce
import itertools as it
# built-in modules
import os
from contextlib import contextmanager
import cv2 as cv
import numpy as np
image_extensions = [".bmp", ".jpg", ".jpeg", ".png", ".tif", ".tiff", ".pbm", ".pgm", ".ppm"]
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a * a).sum(-1)
def anorm(a):
return np.sqrt(anorm2(a))
def homotrans(H, x, y):
xs = H[0, 0] * x + H[0, 1] * y + H[0, 2]
ys = H[1, 0] * x + H[1, 1] * y + H[1, 2]
s = H[2, 0] * x + H[2, 1] * y + H[2, 2]
return xs / s, ys / s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[cx, 0, tx], [0, cy, ty], [0, 0, 1]])
return M
def lookat(eye, target, up=(0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv.SVDecomp(R - np.eye(3))
p = vt[0] + u[:, 0] * w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, target, s):
x, y = target
cv.putText(dst, s, (x + 1, y + 1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {
"red": ((0.0, 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1), (1, 0.5, 0.5)),
"green": ((0.0, 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1), (0.91, 0, 0), (1, 0, 0)),
"blue": ((0.0, 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65, 0, 0), (1, 0, 0)),
}
cmap_data = {"jet": _jet_data}
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ["blue", "green", "red"]:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x + eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T * 255)
def nothing(*arg, **kw):
pass
def clock():
return cv.getTickCount() / cv.getTickFrequency()
@contextmanager
def Timer(msg):
print(
msg, "...",
)
start = clock()
try:
yield
finally:
print("%.2f ms" % ((clock() - start) * 1000))
class StatValue:
def __init__(self, smooth_coef=0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0 - c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
return
if self.drag_start:
if flags & cv.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1 - x0 > 0 and y1 - y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
if PY3:
output = it.zip_longest(fillvalue=fillvalue, *args)
else:
output = it.izip_longest(fillvalue=fillvalue, *args)
return output
def mosaic(w, imgs):
"""Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
"""
imgs = iter(imgs)
if PY3:
img0 = next(imgs)
else:
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color=(0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv.circle(vis, (int(x), int(y)), 2, color)
|
py | b406a46d6097181d7fc672466b03a53844ab102f | #!/usr/bin/env python
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
from __future__ import print_function, unicode_literals
import argparse
import codecs
import difflib
import fnmatch
import io
import errno
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx'
DEFAULT_CLANG_FORMAT_IGNORE = '.clang-format-ignore'
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def excludes_from_file(ignore_file):
excludes = []
try:
with io.open(ignore_file, 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('#'):
# ignore comments
continue
pattern = line.rstrip()
if not pattern:
# allow empty lines
continue
excludes.append(pattern)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return excludes
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x for x in dnames
if
not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [
x for x in fpaths if not fnmatch.fnmatch(x, pattern)
]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile='{}\t(original)'.format(file),
tofile='{}\t(reformatted)'.format(file),
n=3))
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__,
e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, 'r', encoding='utf-8') as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable]
if args.inplace:
invocation.append("-i")
if args.style is not None:
invocation.append('--style')
invocation.append(args.style)
invocation.append(file)
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
#
# It's not pretty, due to Python 2 & 3 compatibility.
encoding_py3 = {}
if sys.version_info[0] >= 3:
encoding_py3['encoding'] = 'utf-8'
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
**encoding_py3)
except OSError as exc:
raise DiffError(
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(invocation), exc
)
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
if sys.version_info[0] < 3:
# make the pipes compatible with Python 3,
# reading lines should output unicode
encoding = 'utf-8'
proc_stdout = codecs.getreader(encoding)(proc_stdout)
proc_stderr = codecs.getreader(encoding)(proc_stderr)
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
# edited file has to read form disk
if args.inplace:
try:
with io.open(file, 'r', encoding='utf-8') as f:
outs = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
return make_diff(file, original, outs), errs
def bold_red(s):
return '\x1b[1m\x1b[31m' + s + '\x1b[0m'
def colorize(diff_lines):
def bold(s):
return '\x1b[1m' + s + '\x1b[0m'
def cyan(s):
return '\x1b[36m' + s + '\x1b[0m'
def green(s):
return '\x1b[32m' + s + '\x1b[0m'
def red(s):
return '\x1b[31m' + s + '\x1b[0m'
for line in diff_lines:
if line[:4] in ['--- ', '+++ ']:
yield bold(line)
elif line.startswith('@@ '):
yield cyan(line)
elif line.startswith('+'):
yield green(line)
elif line.startswith('-'):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
if sys.version_info[0] < 3:
sys.stdout.writelines((l.encode('utf-8') for l in diff_lines))
else:
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = 'error:'
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--clang-format-executable',
metavar='EXECUTABLE',
help='path to the clang-format executable',
default='clang-format')
parser.add_argument(
'--extensions',
help='comma separated list of file extensions (default: {})'.format(
DEFAULT_EXTENSIONS),
default=DEFAULT_EXTENSIONS)
parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='run recursively over directories')
parser.add_argument('files', metavar='file', nargs='+')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help="disable output, useful for the exit code")
parser.add_argument(
'-j',
metavar='N',
type=int,
default=0,
help='run N clang-format jobs in parallel'
' (default number of cpus + 1)')
parser.add_argument(
'--color',
default='auto',
choices=['auto', 'always', 'never'],
help='show colored diff (default: auto)')
parser.add_argument(
'-e',
'--exclude',
metavar='PATTERN',
action='append',
default=[],
help='exclude paths matching the given glob-like pattern(s)'
' from recursive search')
parser.add_argument(
'-i',
'--inplace',
action='store_true',
help='correct files in place')
parser.add_argument(
'-s',
'--style',
metavar="STRING_OR_FILE",
action='store',
help='pass file path or style to apply special formatting')
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == 'always':
colored_stdout = True
colored_stderr = True
elif args.color == 'auto':
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, str("--version")]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(version_invocation), e
),
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
excludes = excludes_from_file(DEFAULT_CLANG_FORMAT_IGNORE)
excludes.extend(args.exclude)
files = list_files(
args.files,
recursive=args.recursive,
exclude=excludes,
extensions=args.extensions.split(','))
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(
partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == '__main__':
sys.exit(main())
|
py | b406a58548043d751fa9b1a1ce457725483cdac1 | # -*- coding: utf-8 -*-
def to_text(path, language='fra'):
"""Wraps Tesseract 4 OCR with custom language model.
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format
"""
import subprocess
from distutils import spawn
import tempfile
import time
# Check for dependencies. Needs Tesseract and Imagemagick installed.
if not spawn.find_executable('tesseract'):
raise EnvironmentError('tesseract not installed.')
if not spawn.find_executable('convert'):
raise EnvironmentError('imagemagick not installed.')
if not spawn.find_executable('gs'):
raise EnvironmentError('ghostscript not installed.')
with tempfile.NamedTemporaryFile(suffix='.tiff') as tf:
# Step 1: Convert to TIFF
gs_cmd = [
'gs',
'-q',
'-dNOPAUSE',
'-r600x600',
'-sDEVICE=tiff24nc',
'-sOutputFile=' + tf.name,
path,
'-c',
'quit',
]
subprocess.Popen(gs_cmd)
time.sleep(3)
# Step 2: Enhance TIFF
magick_cmd = [
'convert',
tf.name,
'-colorspace',
'gray',
'-type',
'grayscale',
'-contrast-stretch',
'0',
'-sharpen',
'0x1',
'tiff:-',
]
p1 = subprocess.Popen(magick_cmd, stdout=subprocess.PIPE)
tess_cmd = ['tesseract', '-l', language, '--oem', '1', '--psm', '3', 'stdin', 'stdout']
p2 = subprocess.Popen(tess_cmd, stdin=p1.stdout, stdout=subprocess.PIPE)
out, err = p2.communicate()
extracted_str = out
return extracted_str
|
py | b406a5dc7444fe638510a635c22d06be132956de | from __future__ import absolute_import
from __future__ import print_function
from DeploymentDirector.rules import ActionSettings, ParamValueAssignation, Match
from voluptuous import Schema
import yaml
import pytest
action_settings = ActionSettings.binding()
settings_1 = """
enabled: True
labels:
label1: value1
parameters:
param1: one value
param2: [two, values]
"""
settings_1 = yaml.safe_load(settings_1)
settings_2 = """
parameters:
the_repo: ${repo}
the_branch: ${branch}
the_ref: "${repo}#${branch}"
"""
settings_2 = yaml.safe_load(settings_2)
@pytest.mark.parametrize('settings', [ settings_1, settings_2 ])
def test_action_settings(settings, match):
def unwind(obj):
if type(obj) in (list,tuple,set):
for v in obj: unwind(v)
elif type(obj) == dict:
for v in obj.values(): unwind(v)
else:
yield v
x = action_settings(settings)
assert(isinstance(x, ActionSettings))
assert(all([isinstance(p,ParamValueAssignation) for p in x.parameters.values()]))
print(x.parameters)
x = x.resolve(match)
print(x.parameters)
assert(isinstance(x, ActionSettings))
assert(all([type(p) in (str,bool,int) for (k,p) in unwind(list(x.parameters.items()))]))
|
py | b406a648d4aa7db6b5dcd3b75d931e9a16f22e04 |
import os
import sys
import tempfile
from .platform import OnPlatform, Platform
#----------------------------------------------------------------------------------------------
class Runner:
def __init__(self, nop=False):
self.nop = nop
def run(self, cmd, output_on_error=False, _try=False):
print(cmd)
sys.stdout.flush()
if self.nop:
return
if output_on_error:
fd, temppath = tempfile.mkstemp()
os.close(fd)
cmd = "{{ {}; }} >{} 2>&1".format(cmd, temppath)
rc = os.system(cmd)
if rc > 0:
if output_on_error:
os.system("cat {}".format(temppath))
os.remove(temppath)
eprint("command failed: " + cmd)
sys.stderr.flush()
if not _try:
sys.exit(1)
return rc
def has_command(self, cmd):
return os.system("command -v " + cmd + " > /dev/null") == 0
#----------------------------------------------------------------------------------------------
class RepoRefresh(OnPlatform):
def __init__(self, runner):
OnPlatform.__init__(self)
self.runner = runner
def redhat_compat(self):
pass
def debian_compat(self):
self.runner.run("apt-get -qq update -y")
def macosx(self):
self.runner.run("brew update || true")
#----------------------------------------------------------------------------------------------
class Setup(OnPlatform):
def __init__(self, nop=False):
OnPlatform.__init__(self)
self.runner = Runner(nop)
self.stages = [0]
self.platform = Platform()
self.os = self.platform.os
self.dist = self.platform.dist
self.ver = self.platform.os_ver
if self.has_command("python"):
self.python = "python"
elif self.has_command("python2"):
self.python = "python2"
elif self.has_command("python3"):
self.python = "python3"
if self.os == 'macosx':
# this is required because osx pip installed are done with --user
os.environ["PATH"] = os.environ["PATH"] + ':' + '$HOME/Library/Python/2.7/bin'
if self.platform.is_debian_compat():
# prevents apt-get from interactively prompting
os.environ["DEBIAN_FRONTEND"] = 'noninteractive'
os.environ["PYTHONWARNINGS"] = 'ignore:DEPRECATION::pip._internal.cli.base_command'
def setup(self):
RepoRefresh(self.runner).invoke()
self.invoke()
def run(self, cmd, output_on_error=False, _try=False):
return self.runner.run(cmd, output_on_error=output_on_error, _try=_try)
def has_command(self, cmd):
return self.runner.has_command(cmd)
#------------------------------------------------------------------------------------------
def apt_install(self, packs, group=False, _try=False):
self.run("apt-get -qq install -y " + packs, output_on_error=True, _try=_try)
def yum_install(self, packs, group=False, _try=False):
if not group:
self.run("yum install -q -y " + packs, output_on_error=True, _try=_try)
else:
self.run("yum groupinstall -y " + packs, output_on_error=True, _try=_try)
def dnf_install(self, packs, group=False, _try=False):
if not group:
self.run("dnf install -y " + packs, output_on_error=True, _try=_try)
else:
self.run("dnf groupinstall -y " + packs, output_on_error=True, _try=_try)
def zypper_install(self, packs, group=False, _try=False):
self.run("zipper --non-interactive install " + packs, output_on_error=True, _try=_try)
def pacman_install(self, packs, group=False, _try=False):
self.run("pacman --noconfirm -S " + packs, output_on_error=True, _try=_try)
def brew_install(self, packs, group=False, _try=False):
# brew will fail if package is already installed
for pack in packs.split():
self.run("brew list {} &>/dev/null || brew install {}".format(pack, pack), output_on_error=True, _try=_try)
def install(self, packs, group=False, _try=False):
if self.os == 'linux':
if self.dist == 'fedora':
self.dnf_install(packs, group=group, _try=_try)
elif self.dist == 'ubuntu' or self.dist == 'debian':
self.apt_install(packs, group=group, _try=_try)
elif self.dist == 'centos' or self.dist == 'redhat':
self.yum_install(packs, group=group, _try=_try)
elif self.dist == 'suse':
self.zypper_install(packs, group=group, _try=_try)
elif self.dist == 'arch':
self.pacman_install(packs, group=group, _try=_try)
else:
Assert(False), "Cannot determine installer"
elif self.os == 'macosx':
self.brew_install(packs, group=group, _try=_try)
else:
Assert(False), "Cannot determine installer"
def group_install(self, packs):
self.install(packs, group=True)
#------------------------------------------------------------------------------------------
def yum_add_repo(self, repourl, repo=""):
if not self.has_command("yum-config-manager"):
self.install("yum-utils")
self.run("yum-config-manager -y --add-repo {}".format(repourl))
def apt_add_repo(self, repourl, repo=""):
if not self.has_command("yum-config-manager"):
self.install("software-properties-common")
self.run("add-apt-repository -y {}".format(repourl))
self.run("apt-get -qq update")
def dnf_add_repo(self, repourl, repo=""):
if self.run("dnf config-manager 2>/dev/null", _try=True):
self.install("dnf-plugins-core")
self.run("dnf config-manager -y --add-repo {}".format(repourl))
def zypper_add_repo(self, repourl, repo=""):
pass
def pacman_add_repo(self, repourl, repo=""):
pass
def brew_add_repo(self, repourl, repo=""):
pass
def add_repo(self, repourl, repo=""):
if self.os == 'linux':
if self.dist == 'fedora':
self.dnf_add_repo(repourl, repo=repo)
elif self.dist == 'ubuntu' or self.dist == 'debian':
self.apt_add_repo(repourl, repo=repo)
elif self.dist == 'centos' or self.dist == 'redhat':
self.yum_add_repo(repourl, repo=repo)
elif self.dist == 'suse':
self.zypper_add_repo(repourl, repo=repo)
elif self.dist == 'arch':
self.pacman_add_repo(repourl, repo=repo)
else:
Assert(False), "Cannot determine installer"
elif self.os == 'macosx':
self.brew_add_repo(packs, group=group, _try=_try)
else:
Assert(False), "Cannot determine installer"
#------------------------------------------------------------------------------------------
def pip_install(self, cmd, _try=False):
pip_user = ''
if self.os == 'macosx':
pip_user = '--user '
self.run("pip install --disable-pip-version-check " + pip_user + cmd, output_on_error=True, _try=_try)
def pip3_install(self, cmd, _try=False):
pip_user = ''
if self.os == 'macosx':
pip_user = '--user '
self.run("pip3 install --disable-pip-version-check " + pip_user + cmd, output_on_error=True, _try=_try)
def setup_pip(self):
get_pip = "set -e; wget -q https://bootstrap.pypa.io/get-pip.py -O /tmp/get-pip.py"
if not self.has_command("pip"):
# self.install("python3-distutils")
self.install_downloaders()
self.run(get_pip + "; " + self.python + " /tmp/get-pip.py", output_on_error=True)
def install_downloaders(self):
if self.os == 'linux':
self.install("ca-certificates")
self.install("curl wget")
def install_git_lfs_on_linux(self):
self.run("curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash")
self.install("git-lfs")
|
py | b406a65f13e4e24796821fac7e045bd18d44b9ae | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 12); |
py | b406a6b28e24b01043619519f0a60d56ea216ee3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iom', '0028_auto_20151204_1407'),
]
operations = [
migrations.AlterModelOptions(
name='alias',
options={'verbose_name_plural': 'Aliassen'},
),
]
|
py | b406a78a060027349f1d033b5392cb64bc888107 | # Copyright (c) 2012-2021 Esri R&D Center Zurich
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A copy of the license is available in the repository's LICENSE file.
import sys
import os
import pyprt
from pyprt.pyprt_utils import visualize_prt_results
CS_FOLDER = os.path.dirname(os.path.realpath(__file__))
def asset_file(filename):
return os.path.join(CS_FOLDER, 'data', filename)
# PRT Initialization
print('\nInitializing PRT.')
pyprt.initialize_prt()
if not pyprt.is_prt_initialized():
raise Exception('PRT is not initialized')
# Data
rpk = asset_file('candler.rpk')
attrs = {}
# Initial Shapes
shape_geometry_1 = pyprt.InitialShape(
[0, 0, 0, 0, 0, 100, 100, 0, 100, 100, 0, 0])
shape_geometry_2 = pyprt.InitialShape(
[0, 0, 0, 0, 0, -10, -10, 0, -10, -10, 0, 0, -5, 0, -5])
# PRT Generation
m = pyprt.ModelGenerator([shape_geometry_2, shape_geometry_1])
encoder_options = {'outputPath': '/tmp/pyprt_output'}
os.makedirs(encoder_options['outputPath'], exist_ok=True)
models = m.generate_model(
[attrs], rpk, 'com.esri.prt.codecs.OBJEncoder', encoder_options)
print('\nGenerated models located in '+encoder_options['outputPath'])
# PRT End
pyprt.shutdown_prt()
print('\nShutdown PRT.')
|
py | b406a7dd2aa2ee796a0e7feeb736f2400ba61128 | import os
import numpy as np
from PIL import Image
from common_utils import save_as_hdf5
POSTFIX = ['.png','.jpg','.JPG','.Jpg','.jpeg','.bmp','.BMP','.tif']
DIM = (512,512)
def postfix_search(input_path):
'''
DFS for postfix searching which is beneficial for data converting.
'''
postfix = set()
if os.path.isdir(input_path):
entry_iterator = os.scandir(input_path)
for item in entry_iterator:
if item.is_dir():
postfix = postfix.union(postfix_search(item.path))
else:
postfix.add(os.path.splitext(item.name)[1])
return postfix
def convert_to_npy(input_path,save_path):
'''
Convert the raw data(e.g. jpg...) to numpy array and save as hdf5.
Basic process operations:
- normalization:[0,1]
- resize:(512,512)
- stack:single silce to 3d format
'''
ID = []
if not os.path.exists(save_path):
os.makedirs(save_path)
if os.path.isdir(input_path):
item_list = os.listdir(input_path)
if len(item_list) > 0:
if os.path.isfile(os.path.join(input_path,item_list[0])):
patient_id = os.path.basename(input_path)
ID.append(patient_id)
hdf5_path = os.path.join(save_path,"%s.hdf5" % patient_id)
try:
# convert image to numpy array with fixed 2d-dim: DIM(512,512)
item_list.sort(key=lambda x:int(x.split('.')[0]))
img_list = [img_reader(os.path.join(input_path,item),DIM) for item in item_list]
img_array = np.stack(img_list,axis=0) # (z,x,y)
# save as hdf5, key='img'
save_as_hdf5(img_array,hdf5_path,'img')
except:
print(input_path)
pass
else:
for item in item_list:
ID.extend(convert_to_npy(os.path.join(input_path,item),save_path))
return ID
def img_reader(input_path,dim):
'''
Image file reader, return image array.
Other operation:
- resize: fixed dim
- normalize: [0,1]
Args:
- input path: file path
- dim: a tuple of 2 integers
'''
# graylevel mode
img = Image.open(input_path).convert('L')
# resize if need, mode=Image.NEAREST
if img.size != dim:
img = img.resize(dim,Image.NEAREST)
# convert to numpy array, data type = np.float32
img_array = np.asarray(img,dtype=np.float32)
# normalize:[0,255] -> [0.0,1.0]
img_array = img_array / 255.0
return img_array
if __name__ == "__main__":
'''
# Part-1:search all file postfixes for converting
input_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/raw_data/Normal'
postfix = postfix_search(input_path)
print(postfix)
'''
# Part-2:convert image to numpy array and save as hdf5
input_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/raw_data/CP'
save_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/npy_data/CP'
patient_id = convert_to_npy(input_path,save_path)
print("CP %d samples done"%len(patient_id))
input_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/raw_data/NCP'
save_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/npy_data/NCP'
patient_id = convert_to_npy(input_path,save_path)
print("NCP %d samples done"%len(patient_id))
input_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/raw_data/Normal'
save_path = '/staff/shijun/torch_projects/COVID-19_CLS/dataset/npy_data/Normal'
patient_id = convert_to_npy(input_path,save_path)
print("Normal %d samples done"%len(patient_id))
|
py | b406a85fe5f3c33054c3b005d9d13bbe11eaa3c1 | from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import time
import os
from six.moves import cPickle
from utils import TextLoader
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='model directory to load stored checkpointed models from')
parser.add_argument('-n', type=int, default=250,
help='number of words to sample')
parser.add_argument('--prime', type=str, default='gig',
help='prime text')
parser.add_argument('--pick', type=int, default=1,
help='1 = weighted pick, 2 = beam search pick')
parser.add_argument('--width', type=int, default=4,
help='width of the beam search')
parser.add_argument('--sample', type=int, default=1,
help='0 to use max at each timestep, 1 to sample at each timestep, 2 to sample on spaces')
parser.add_argument('--count', '-c', type=int, default=1,
help='number of samples to print')
parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress printing the prime text (default false)')
args = parser.parse_args()
sample(args)
def sample(args):
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:
words, vocab = cPickle.load(f)
model = Model(saved_args, True)
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for _ in range(args.count):
print(model.sample(sess, words, vocab, args.n, args.prime, args.sample, args.pick, args.width, args.quiet))
if __name__ == '__main__':
main()
|
py | b406a9364fd615beb91b42aa48308d172afbcfc9 | #!/usr/bin/env python3
import os, sys, time, re, string
import datetime, pytz
import subprocess
import shutil
from enum import Enum
import platform
import threading
import yaml
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import google.cloud.storage
import google.cloud.logging
import logging
from tkbuild.job import TKBuildJob, TKWorkstepDef, JobStatus
from tkbuild.project import TKBuildProject
from tkbuild.artifact import TKArtifact
# TKBUILD TODO
# - Add build tags to builds to filter agents (e.g. win32, dev)
# - Figure out "voting" or transaction based write for firebase to ensure only one agent runs a job
# - Figure out reliable way to stop/resume build agent on mac
class TKBuildAgentConfig(object):
def __init__(self, agentConfigFile):
self.googleCredentialFile = "MISSING"
self.googleProjectId = "my-projectid-00000"
self.projectConfigs = []
print("Initializing build agent from config ", agentConfigFile)
if not os.path.exists(agentConfigFile):
print("WARN: agent config file doesn't exist or can't be read:", agentConfigFile)
else:
with open(agentConfigFile) as fpconfig:
docs = yaml.full_load(fpconfig)
agentCfg = docs.get("build-agent")
print( agentCfg['name'] )
print( agentCfg['desc'] )
for projectCfgDoc in docs['projects']:
projectCfgData = projectCfgDoc['project']
project = TKBuildProject.createFromConfig( projectCfgData )
# An agent is a script that runs builds on a worker machine. One agent
# may have multiple projects
class TKBuildAgent(object):
def __init__(self ):
self.name = "unnamed-build-agent"
self.desc = "TkBuild Build Agent"
self.tkbuildDir = "/tmp/tkbuild/"
self.googleCredentialFile = "MISSING"
self.googleProjectId = "my-projectid-00000"
self.dryRun = False
self.projects = {}
self.platform = platform.system() # e.g. "Darwin"
self.serverDone = False
self.updCount = 0 # mostly for debugging
self.storage_client = None
self.db = None
self.jobList = [] # Will be populated from the jobs callback
# Set when something has changed and the worker should
# run an update
self.changeEvent = threading.Event()
# This is the currently running job.
self.currentJob = None
@classmethod
def createFromConfig( cls, configData, tkBuildDir ):
agentCfg = configData.get("build-agent")
agent = cls()
agent.name = agentCfg.get( 'name', agent.name )
agent.desc = agentCfg.get( 'desc', agent.desc )
agent.tkbuildDir = tkBuildDir
gcloudConfig = agentCfg.get('gcloud', {} )
agent.googleCredentialFile = gcloudConfig.get( 'credfile', agent.googleCredentialFile )
agent.googleProjectId = gcloudConfig.get( 'project-id', agent.googleProjectId )
for projectCfgDoc in configData.get('projects'):
projectCfgData = projectCfgDoc['project']
project = TKBuildProject.createFromConfig( projectCfgData )
if project:
agent.projects[project.projectId] = project
return agent
def commitJobChanges( self, job):
job_ref = self.jobs_ref.document( job.jobKey )
jobData = job.toFirebaseDict()
job_ref.set( jobData )
def updateJobsList(self, jobs_ref ):
newJobsList = []
for jobRef in jobs_ref:
proj = self.projects[ jobRef.get('projectId') ]
job = TKBuildJob.createFromFirebaseDict( proj, jobRef.id, jobRef )
newJobsList.append( job )
# TODO; wrap log_struct with something that can log to console too
#self.logger.log_struct({ 'jobkey' : job.jobKey, 'worksteps' : job.worksteps } )
self.jobList = newJobsList
logging.info( f"Updated jobs list (length {len(self.jobList)}).")
def onJobsListChanged( self, jobs, changes, read_time):
#print( "On jobslist changed: ", jobs )
logging.info( "Job list changed:")
self.updateJobsList( jobs )
# alert the main build that we might need to do some work
self.changeEvent.set()
@classmethod
def createFromConfigFile(cls, agentConfigFile ):
# Use where the agent Cfg is located as the default for the build dir
defaultTkBuildDir = os.path.split( agentConfigFile )[0]
if not os.path.exists(agentConfigFile):
logging.warning("WARN: agent config file doesn't exist or can't be read:", agentConfigFile)
else:
with open(agentConfigFile) as fpconfig:
configData = yaml.full_load(fpconfig)
return cls.createFromConfig( configData, defaultTkBuildDir )
def orderedProjects(self):
projects = list( self.projects.values() )
projects.sort( key=lambda pp: pp.sortKey )
return projects
def serverMainloop(self, db ):
self.db = db
# Set the jobs changed callback
self.jobs_ref = db.collection(u'jobs')
query_watch = self.jobs_ref.on_snapshot( self.onJobsListChanged )
#self.updateJobsList(jobs_ref.get() )
# for doc in jobs_ref.get():
# print(f'{doc.id} => {doc.to_dict()}')
# Make some test jobs
# testJob = TKBuildJob("testrepo")
# testJob.commitVer = "f5c86435acd0af16561eeaaa74225d4b93829115"
# testJob.worksteps = {"fetch": JobStatus.TODO,
# "build": JobStatus.TODO }
# testJob = TKBuildJob("tkwordlist")
# testJob.commitVer = "05350960499b752bc13dd56144d6be8632ad82ca"
# testJob.worksteps = {"fetch": JobStatus.TODO,
# "build": JobStatus.TODO}
#
# print(f"Testjob: {testJob}")
# testJobRef = db.collection(u'jobs').document()
# testJobRef.set(testJob.toFirebaseDict())
# Run the mainloop
while not self.serverDone:
print("update ...")
self.serverUpdate()
self.changeEvent.wait( 60.0 ) # TODO: make timeout time an option
self.changeEvent.clear()
def serverUpdate(self):
logging.info( f"Agent update ... {self.updCount}")
self.updCount += 1
print( f" {len(self.jobList)} avail jobs:")
# Check if there are any obsolete jobs, and delete them
self.cleanupObsoleteJobs()
# Check if there are any jobdirs that do not exist in the job list. If so, clean up those job dirs.
self.cleanupOldJobDirs()
# Check if there are jobs we can do
for job in self.jobList:
proj = self.projects[job.projectId]
# Ignore jobs marked "RUN" ... this might be running on another node (todo) but
# probably is just stale because firebase updates are not instant.
if JobStatus.RUN in job.worksteps.values():
logging.warning("Job is marked RUN?? but we're not running it.")
#sys.exit(1)
continue
# If the job has work left to do
if job.hasWorkRemaining( proj.workstepNames ):
print("job ", job, "has work left...")
self.currentJob = job
break
else:
print( "No work remains", job, job.worksteps )
# Did we find a job to run?
if self.currentJob == None:
logging.info("No matching jobs found to run.")
else:
# run the job
self.runNextJobStep( self.currentJob )
# clear the current job
self.currentJob = None
# Check if there are any jobs with todo worksteps that match the project and platform for this agent.
# (todo: sort/priority for these) If so:
# - Change workstep status to “running”
# - Do the workstep (call $PROJECT_DIR/workdir/$REPO_NAME/tkbuild workstep)
# - Change the workstep status to “Completed” or “Failed”
def cleanupOldProjectJobs(self, proj, projJobExpireDate ):
print("cleanupOldProjectJobs", proj.projectId, len(self.jobList) )
for job in self.jobList:
if (job.projectId==proj.projectId) and (job.timestamp < projJobExpireDate):
self.db.collection(u'jobs').document(job.jobKey).delete()
def cleanupObsoleteJobs(self):
if len(self.jobList)==0:
return
for proj in self.projects.values():
projJobExpireDate = datetime.datetime.now( tz=pytz.UTC ) - datetime.timedelta( minutes=proj.jobDeleteAge )
print(f"Project {proj.projectId} will expire jobs before {projJobExpireDate}")
self.cleanupOldProjectJobs( proj, projJobExpireDate )
def cleanupOldJobDirs(self ):
# Make a list of the jobkeys we have for easy lookup
haveJobKeys = set()
for job in self.jobList:
haveJobKeys.add( job.jobKey )
# Look in the project workdir for any jobdirs that
# match the pattern for a jobdir
for proj in self.projects.values():
for dir in os.listdir( proj.workDir ):
dsplit = dir.split( "_" )
if len (dsplit) != 2:
continue
dirProj, jobKey = dsplit
if dirProj != proj.projectId:
continue
if len(jobKey) != 20:
continue
# At this point we are pretty sure this is a work dir, and
# can infer the jobkey from the workdir
if jobKey in haveJobKeys:
print ("Nope this is an active job")
continue
# Also look for other dirs listed in cleanupDirs
workDir = os.path.join( proj.workDir, dir )
cleanupDirs = [ workDir ]
workname = proj.projectId + "_" + jobKey
for extraDir in proj.cleanupDirs:
dir2 = self.replacePathVars2( extraDir, workDir, proj, None, workname )
# Make sure there are no unexpanded vars, kind of a hack but
if dir2.find("$")==-1:
cleanupDirs.append( dir2 )
for cleanDir in cleanupDirs:
if os.path.exists( cleanDir ):
logging.info( f"Cleaning up old workdir {cleanDir}" )
shutil.rmtree( cleanDir )
def failJob(self, job, wsdefFailed ):
logging.error( f"Job {job.jobKey}:{wsdefFailed.stepname} failed.")
# Go through the worksteps until we find the one that failed.
# Mark it as failed, and any subsequent ones as cancelled
proj = self.projects[job.projectId]
foundFailedStep = False
for wsdef in proj.workstepDefs:
if not foundFailedStep and wsdef.stepname == wsdefFailed.stepname:
foundFailedStep = True
job.setWorkstepStatus( wsdef.stepname, JobStatus.FAIL )
elif foundFailedStep and job.worksteps[ wsdef.stepname ] == JobStatus.TODO:
job.setWorkstepStatus(wsdef.stepname, JobStatus.CANCEL)
self.commitJobChanges( job )
def archiveLog(self, job, wsdef, workstepLog ):
proj = self.projects[job.projectId]
# Check that we're configured to publish stuff
if not proj.bucketName:
logging.warning("archiveLog: No bucketName set in project, can't archive log.")
return False
# Make sure the file exists
if not os.path.exists(workstepLog):
logging.warning( f"archiveLog: Workstep log file {workstepLog} does not exist." )
return False
else:
logging.info(f"Archiving {workstepLog} to bucket {proj.bucketName}")
if self.storage_client is None:
self.storage_client = google.cloud.storage.Client()
logFilename = os.path.split(workstepLog)[-1]
bucket = self.storage_client.bucket(proj.bucketName)
blobName = os.path.join(proj.projectId, job.jobKey, "logs", logFilename)
blob = bucket.blob(blobName)
result = blob.upload_from_filename(workstepLog, content_type="text/plain;charset=UTF-8")
logArchiveUrl = f"https://{bucket.name}.storage.googleapis.com/{blob.name}"
logging.info(f"Result of upload is {logArchiveUrl}")
def replacePathVars(self, origPath, workdirRepoPath, proj, job ):
return self.replacePathVars2( origPath, workdirRepoPath, proj, job, job.jobDirShort )
def replacePathVars2(self, origPath, workdirRepoPath, proj, job, workname ):
vars = {
"TKBUILD" : self.tkbuildDir,
"WORKDIR" : workdirRepoPath,
"PROJWORKDIR" : proj.workDir,
"WORKNAME": workname,
}
if job:
vars.update( {
"COMMIT": job.commitVer,
"VERSION": job.version,
"BUILDNUM": str(job.buildNum)
})
result = origPath
for varname, value in vars.items():
varstr = "$" + varname
if result.find( varstr ) != -1:
result = result.replace( varstr, value )
# result = origPath.replace("$TKBUILD", self.tkbuildDir)
# result = result.replace("$WORKDIR", workdirRepoPath)
return result
def publishArtifact( self, proj, job, wsdef, workdirRepoPath ):
# Check that we're configured to publish stuff
if not proj.bucketName:
logging.warning("publishArtifact: No bucketName set in project, can't publish.")
return False
# Make sure the file exists
artifactFile = wsdef.artifact
artifactFile = self.replacePathVars( artifactFile, workdirRepoPath, proj, job )
if not os.path.exists( artifactFile ):
failMsg = f"Artifact file {artifactFile} does not exist."
logging.warning( failMsg )
job.lastError = failMsg
return False
else:
logging.info( f"Publishing {artifactFile} to bucket {proj.bucketName}")
if self.storage_client is None:
self.storage_client = google.cloud.storage.Client()
artifactFileName = os.path.split( artifactFile )[-1]
bucket = self.storage_client.bucket( proj.bucketName )
blobName = os.path.join( proj.projectId, job.jobKey, artifactFileName)
blob = bucket.blob( blobName )
result = blob.upload_from_filename( artifactFile )
artifactUrl = f"https://storage.googleapis.com/{bucket.name}/{blob.name}"
logging.info( f"Result of upload is {artifactUrl}")
# Make an artifact entry in the DB
artifact = TKArtifact()
artifact.project = proj.projectId
artifact.commitVer = job.commitVer
artifact.jobKey = job.jobKey
artifact.builtfile = artifactUrl
# If the artifact has a manifestBundleId, make a manifest for it
if proj.manifestBundleId:
artifact.addManifestInfo( proj.manifestAppTitle, proj.manifestBundleId, job.version, job.buildNum, artifactUrl )
# maybe want to make this more configurable
manifestName = f"{proj.projectId}_manifest_{job.version}_build_{job.buildNum}.plist"
manifestBlobName = os.path.join( proj.projectId, job.jobKey, manifestName)
manifestBlob = bucket.blob( manifestBlobName )
result = manifestBlob.upload_from_string( artifact.generateManifestFile())
manifestUrl = f"https://storage.googleapis.com/{bucket.name}/{manifestBlob.name}"
artifact.manifest['manifestURL'] = manifestUrl
logging.info( f"Uploaded IOS manifest to {manifestUrl}" )
pubArtifactRef = self.db.collection(u'artifacts').document()
pubArtifactRef.set( artifact.toFirebaseDict() )
logging.info( f"Added artifact with ref {pubArtifactRef.id}")
return True
def peekVersion( self, job, versionFile ):
if not os.path.exists( versionFile ):
logging.warning( f"Version file {versionFile} does not exist.")
return
with open( versionFile ) as fp:
verLine = fp.readline().strip()
if verLine:
job.version = verLine
logging.info( f"PeekVersion: Version is {job.version}" )
def runNextJobStep(self, job ):
logging.info("Run next job step....")
# Go through the worksteps defined for this project and
# do the next one that needs to be done for this job
proj = self.projects[ job.projectId ]
for wsdef in proj.workstepDefs:
if ((wsdef.stepname in job.worksteps) and
(job.worksteps[wsdef.stepname] == JobStatus.TODO)):
# Mark this workstep as running
job.setWorkstepStatus(wsdef.stepname, JobStatus.RUN)
self.commitJobChanges( job )
# Open a logfile for this workstep
workstepLog = os.path.join( proj.workDir, "logs", job.jobDirShort + "_" + wsdef.stepname )
logPath = os.path.split( workstepLog )[0]
os.makedirs( logPath, exist_ok=True )
with open( workstepLog, "wt") as fpLog:
fpLog.write( f"WORKSTEP: {wsdef.stepname}\n" )
# Extra magic for 'fetch' and 'build' for now
if wsdef.stepname == 'fetch':
if not self.workstepFetch( job, wsdef, fpLog ):
logging.warning("fetch workstep FAILED.")
# The fetch failed for some reason, fail the workstep
self.failJob( job, wsdef )
break
else:
logging.info("fetch succeeded, marking as DONE")
job.setWorkstepStatus(wsdef.stepname, JobStatus.DONE)
self.commitJobChanges(job)
elif wsdef.stepname == 'build':
job.buildNum = proj.incrementBuildNumber( job.jobKey, self.db )
# Common workstep steps
logging.info( f"Will do job step {wsdef.stepname}" )
workdirRepoPath = os.path.join(proj.workDir, job.jobDirShort)
if wsdef.cmd:
# Fixme: allow array args or something to handle spaces in args
stepCmd = []
for stepCmdSplit in wsdef.cmd.split():
#print ("SPLIT", stepCmdSplit)
# Replace the project dirs
stepCmdSplit = self.replacePathVars( stepCmdSplit, workdirRepoPath, proj, job )
stepCmd.append( stepCmdSplit )
print("step command is ", stepCmd )
result, cmdTime = self.echoRunCommand( stepCmd, fpLog, self, job )
elif wsdef.stepname != 'fetch':
# Fetch might not have a cmd, but other steps probably will
logging.warning(f"Workstep {job.projectId}:{wsdef.stepname} has no cmd defined.")
result = 0 # treat as done
cmdTime = datetime.timedelta(0)
if result == 0:
# Did succeed?
logging.info(f"Workstep {job.projectId}:{wsdef.stepname} completed success.")
# If this workstep generates a version number, retrieve it now
if wsdef.peekVersion:
versionFile = self.replacePathVars( wsdef.peekVersion, workdirRepoPath, proj, job )
self.peekVersion( job, versionFile )
# And set the status to done
job.setWorkstepStatus(wsdef.stepname, JobStatus.DONE )
self.commitJobChanges(job)
# If this workstep made an artifact that should get published, do so
logging.info( f"wsdef artifact is {wsdef.artifact}")
if wsdef.artifact:
if not self.publishArtifact( proj, job, wsdef, workdirRepoPath ):
self.failJob( job, wsdef )
else:
# Step failed, fail the whole job :_(
self.failJob( job, wsdef )
# Workstep finished, archive the log file
self.archiveLog(job, wsdef, workstepLog)
# we did one workstep here, so don't keep looking for available ones. We'll
# get the next one the next time through the loop
break
def makePristineRepoPath(self, proj ):
pristineRepoPath = os.path.join(proj.projectDir, proj.projectId + "_pristine")
return pristineRepoPath
def getRecentCommits(self, proj ):
pristineRepoPath = self.makePristineRepoPath( proj)
if not os.path.exists(pristineRepoPath):
# Don't implicitly pull the repo here
return []
gitCmd = ["git",
"-C", pristineRepoPath,
"log", "--oneline", "--no-decorate", "-n","20"
]
print( "gitCmd is ", gitCmd )
# PIPE nonsense does capture_output in py3.6
#result = subprocess.run( gitCmd, capture_output=True )
result = subprocess.run( gitCmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
if result.returncode:
return [ "ERROR in git log" ]
else:
commitList = []
for line in result.stdout.decode("utf-8").split("\n"):
if line:
commitList.append( line )
return commitList
def updatePristineRepo( self, proj, wsdef, fpLog ):
# see if the "pristine repo" exists
pristineRepoPath = self.makePristineRepoPath( proj )
if not os.path.exists( pristineRepoPath ):
logging.info(f"Cloning pristine repo {pristineRepoPath}")
gitCmd = [ "git", "clone", wsdef.repoUrl, pristineRepoPath ]
retVal, cmdTime = self.echoRunCommand( gitCmd, fpLog )
else:
logging.info(f"Pristine repo exists at {pristineRepoPath}")
# Bring the pristine repo up to date with remote main
gitPullCmd = [ "git", "-C", pristineRepoPath,
"pull" ]
retVal, cmdTime = self.echoRunCommand(gitPullCmd, fpLog )
if retVal:
return None
return pristineRepoPath
# I don't like this workstep being hardcoded in the agent but not sure exactly
# how I want it to look so I'm putting it here for now.
def workstepFetch(self, job, wsdef, fpLog ):
proj = self.projects[job.projectId]
pristineRepoPath = self.updatePristineRepo( proj, wsdef, fpLog )
if not pristineRepoPath:
return False
# Now clone the pristine repo into the work dir
workdirRepoPath = os.path.join( proj.workDir, job.jobDirShort )
if os.path.exists( workdirRepoPath ):
# Might make this a fatal error later, or nuke and re-copy this dir, but for
# now we'll allow this to make debugging easier.
logging.warning( f"Workdir repo {workdirRepoPath} already exists, using that.")
else:
gitCloneCommand = [ "git", "clone", pristineRepoPath, workdirRepoPath ]
retVal, cmdTime = self.echoRunCommand(gitCloneCommand, fpLog)
if retVal:
return False
# Now bring the workdir copy of the repo up to date with what we're
# trying to build
gitCheckoutCommand = [ "git", "-C", workdirRepoPath,
"checkout", job.commitVer ]
retVal, cmdTime = self.echoRunCommand( gitCheckoutCommand, fpLog )
if retVal:
return False
return True
def _runCommandInternal( self, process):
while True:
line = process.stdout.readline().rstrip()
if not line:
break
yield line
def echoRunCommand( self, command, fpLog, agent = None, job = None ):
"""returns ( returnValue, timeTaken) """
cmdStr = " ".join(command)
if fpLog:
fpLog.write( "CMD: " + cmdStr + "\n")
fpLog.flush()
logging.info(cmdStr)
if (self.dryRun):
return (0, datetime.timedelta(0))
startTime = datetime.datetime.now()
# FIXME: handle STDERR separately, but python makes this hard
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) # , shell=True)
while True:
for linebytes in self._runCommandInternal(process):
line = linebytes.decode("utf-8")
isError = False
isWarn = False
# FIXME: Better parsing here, also make it tool-aware
if line.find( "fatal:") >= 0 or line.find( "error:" ) >= 0:
isError = True
elif line.find("warning:") >= 0:
isWarn = True
if isError:
logging.error(line)
if fpLog:
fpLog.write( "ERROR: "+ line + "\n")
fpLog.flush()
if job:
job.countError( line )
elif isWarn:
logging.warning(line)
if fpLog:
fpLog.write("WARN: " + line + "\n")
fpLog.flush()
if job:
job.countWarning()
else:
logging.info( line )
if fpLog:
fpLog.write( line + "\n")
fpLog.flush()
if (isError or isWarn) and (agent and job):
agent.commitJobChanges( job )
# Is the subprocess done?
if process.poll() is not None:
break
endTime = datetime.datetime.now()
cmdDuration = endTime - startTime
cmdStatus = f"Finished with retval {process.returncode} time taken {cmdDuration}";
logging.info( cmdStatus )
if fpLog:
fpLog.write( cmdStatus + "\n\n\n" )
fpLog.flush()
return (process.returncode, cmdDuration)
|
bzl | b406a946d3570f720b32024cf559476a7f9dbe75 | # In both open-source and fbcode builds, these are generated into
# torch/csrc/{autgrad,jit}/generated.i
GENERATED_CPP = [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
"autograd/generated/python_functions.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions.cpp",
"autograd/generated/python_variable_methods.cpp",
]
# NVFuser runtime library
libtorch_nvfuser_runtime_sources = [
"torch/csrc/jit/codegen/cuda/runtime/block_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/fp16_support.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/helpers.cu",
"torch/csrc/jit/codegen/cuda/runtime/random_numbers.cu",
"torch/csrc/jit/codegen/cuda/runtime/tensor.cu",
"aten/src/ATen/cuda/detail/PhiloxCudaStateRaw.cuh",
"aten/src/ATen/cuda/detail/UnpackRaw.cuh",
]
libtorch_nvfuser_generated_headers = ["{}.h".format(name.split("/")[-1].split(".")[0]) for name in libtorch_nvfuser_runtime_sources]
def libtorch_generated_sources(gencode_pattern):
return [gencode_pattern.format(name) for name in [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
]]
# copied from https://github.com/pytorch/pytorch/blob/f99a693cd9ff7a9b5fdc71357dac66b8192786d3/aten/src/ATen/core/CMakeLists.txt
jit_core_headers = [
"torch/csrc/utils/memory.h",
"torch/csrc/WindowsTorchApiMacro.h",
"torch/csrc/jit/frontend/source_range.h",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.h",
"torch/csrc/jit/serialization/source_range_serialization.h",
"torch/csrc/jit/frontend/lexer.h",
"torch/csrc/jit/frontend/strtod.h",
"torch/csrc/jit/frontend/parser_constants.h",
"torch/csrc/jit/frontend/function_schema_parser.h",
"torch/csrc/jit/frontend/parse_string_literal.h",
"torch/csrc/jit/frontend/schema_type_parser.h",
"torch/csrc/jit/frontend/error_report.h",
"torch/csrc/jit/frontend/tree.h",
"torch/custom_class.h",
"torch/custom_class_detail.h",
"torch/library.h",
]
jit_core_sources = [
"torch/csrc/jit/frontend/error_report.cpp",
"torch/csrc/jit/frontend/function_schema_parser.cpp",
"torch/csrc/jit/frontend/lexer.cpp",
"torch/csrc/jit/frontend/schema_type_parser.cpp",
"torch/csrc/jit/frontend/strtod.cpp",
"torch/csrc/jit/frontend/source_range.cpp",
]
# copied from https://github.com/pytorch/pytorch/blob/0bde610c14b92d351b968a0228df29e92442b1cc/torch/CMakeLists.txt
# There are some common files used in both internal lite-interpreter and full-jit. Making a separate
# list for the shared files.
core_sources_common = [
"torch/csrc/autograd/profiler_legacy.cpp",
"torch/csrc/autograd/profiler_kineto.cpp",
"torch/csrc/autograd/profiler_utils.cpp",
"torch/csrc/autograd/autograd_meta.cpp",
"torch/csrc/autograd/forward_grad.cpp",
"torch/csrc/jit/frontend/edit_distance.cpp",
"torch/csrc/jit/frontend/string_to_type.cpp",
"torch/csrc/jit/mobile/type_parser.cpp",
"torch/csrc/jit/mobile/runtime_compatibility.cpp",
"torch/csrc/jit/runtime/instruction.cpp",
"torch/csrc/jit/runtime/jit_exception.cpp",
"torch/csrc/jit/runtime/operator.cpp",
"torch/csrc/jit/runtime/print_handler.cpp",
"torch/csrc/jit/runtime/slice_indices_adjust.cpp",
"torch/csrc/jit/runtime/register_ops_utils.cpp",
"torch/csrc/jit/runtime/vararg_functions.cpp",
"torch/csrc/jit/serialization/import_read.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
libtorch_sources_common = core_sources_common
core_trainer_sources = [
"torch/csrc/autograd/anomaly_mode.cpp",
"torch/csrc/autograd/autograd.cpp",
"torch/csrc/autograd/cpp_hook.cpp",
"torch/csrc/autograd/custom_function.cpp",
"torch/csrc/autograd/engine.cpp",
"torch/csrc/autograd/function.cpp",
"torch/csrc/autograd/function_hook.cpp",
"torch/csrc/autograd/functions/accumulate_grad.cpp",
"torch/csrc/autograd/functions/basic_ops.cpp",
"torch/csrc/autograd/functions/tensor.cpp",
"torch/csrc/autograd/functions/utils.cpp",
"torch/csrc/autograd/input_buffer.cpp",
"torch/csrc/autograd/record_function_ops.cpp",
"torch/csrc/autograd/saved_variable.cpp",
"torch/csrc/autograd/variable.cpp",
"torch/csrc/jit/frontend/name_mangler.cpp",
"torch/csrc/jit/ir/type_hashing.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/type_name_uniquer.cpp",
]
core_sources_full_mobile = [
"torch/csrc/jit/api/function_impl.cpp",
"torch/csrc/jit/api/module.cpp",
"torch/csrc/jit/api/object.cpp",
"torch/csrc/jit/backends/backend_debug_handler.cpp",
"torch/csrc/jit/backends/backend_detail.cpp",
"torch/csrc/jit/backends/backend_interface.cpp",
"torch/csrc/jit/backends/backend_resolver.cpp",
"torch/csrc/jit/codegen/fuser/codegen.cpp",
"torch/csrc/jit/codegen/fuser/compiler.cpp",
"torch/csrc/jit/codegen/fuser/executor.cpp",
"torch/csrc/jit/codegen/fuser/fallback.cpp",
"torch/csrc/jit/codegen/fuser/interface.cpp",
"torch/csrc/jit/codegen/fuser/kernel_cache.cpp",
"torch/csrc/jit/frontend/builtin_functions.cpp",
"torch/csrc/jit/frontend/versioned_symbols.cpp",
"torch/csrc/jit/frontend/canonicalize_modified_loop.cpp",
"torch/csrc/jit/frontend/convert_to_ssa.cpp",
"torch/csrc/jit/frontend/exit_transforms.cpp",
"torch/csrc/jit/frontend/inline_loop_condition.cpp",
"torch/csrc/jit/frontend/ir_emitter.cpp",
"torch/csrc/jit/frontend/parser.cpp",
"torch/csrc/jit/frontend/schema_matching.cpp",
"torch/csrc/jit/frontend/script_type_parser.cpp",
"torch/csrc/jit/frontend/sugared_value.cpp",
"torch/csrc/jit/frontend/tracer.cpp",
"torch/csrc/jit/ir/alias_analysis.cpp",
"torch/csrc/jit/ir/attributes.cpp",
"torch/csrc/jit/ir/constants.cpp",
"torch/csrc/jit/ir/ir.cpp",
"torch/csrc/jit/ir/irparser.cpp",
"torch/csrc/jit/ir/node_hashing.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/ir/subgraph_matcher.cpp",
"torch/csrc/jit/jit_log.cpp",
"torch/csrc/jit/jit_opt_limit.cpp",
"torch/csrc/jit/passes/annotate_warns.cpp",
"torch/csrc/jit/passes/bailout_graph.cpp",
"torch/csrc/jit/passes/batch_mm.cpp",
"torch/csrc/jit/passes/canonicalize.cpp",
"torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp",
"torch/csrc/jit/passes/clear_profiling.cpp",
"torch/csrc/jit/passes/clear_undefinedness.cpp",
"torch/csrc/jit/passes/common_subexpression_elimination.cpp",
"torch/csrc/jit/passes/concat_opt.cpp",
"torch/csrc/jit/passes/constant_pooling.cpp",
"torch/csrc/jit/passes/constant_propagation.cpp",
"torch/csrc/jit/passes/create_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/dead_code_elimination.cpp",
"torch/csrc/jit/passes/remove_redundant_profiles.cpp",
"torch/csrc/jit/passes/remove_exceptions.cpp",
"torch/csrc/jit/passes/decompose_ops.cpp",
"torch/csrc/jit/passes/erase_number_types.cpp",
"torch/csrc/jit/passes/fixup_trace_scope_blocks.cpp",
"torch/csrc/jit/passes/freeze_module.cpp",
"torch/csrc/jit/passes/fuse_linear.cpp",
"torch/csrc/jit/passes/fuse_relu.cpp",
"torch/csrc/jit/passes/graph_fuser.cpp",
"torch/csrc/jit/passes/graph_rewrite_helper.cpp",
"torch/csrc/jit/passes/guard_elimination.cpp",
"torch/csrc/jit/passes/hoist_conv_packed_params.cpp",
"torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/inline_forked_closures.cpp",
"torch/csrc/jit/passes/inline_fork_wait.cpp",
"torch/csrc/jit/passes/inliner.cpp",
"torch/csrc/jit/passes/inplace_check.cpp",
"torch/csrc/jit/passes/insert_guards.cpp",
"torch/csrc/jit/passes/lift_closures.cpp",
"torch/csrc/jit/passes/liveness.cpp",
"torch/csrc/jit/passes/loop_unrolling.cpp",
"torch/csrc/jit/passes/lower_grad_of.cpp",
"torch/csrc/jit/passes/lower_tuples.cpp",
"torch/csrc/jit/passes/normalize_ops.cpp",
"torch/csrc/jit/passes/peephole_list_idioms.cpp",
"torch/csrc/jit/passes/peephole_alias_sensitive.cpp",
"torch/csrc/jit/passes/pass_manager.cpp",
"torch/csrc/jit/passes/peephole.cpp",
"torch/csrc/jit/passes/create_functional_graphs.cpp",
"torch/csrc/jit/passes/remove_mutation.cpp",
"torch/csrc/jit/passes/prepack_folding.cpp",
"torch/csrc/jit/passes/fold_conv_bn.cpp",
"torch/csrc/jit/passes/frozen_conv_add_relu_fusion.cpp",
"torch/csrc/jit/passes/frozen_conv_folding.cpp",
"torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp",
"torch/csrc/jit/passes/frozen_graph_optimizations.cpp",
"torch/csrc/jit/passes/remove_expands.cpp",
"torch/csrc/jit/passes/remove_dropout.cpp",
"torch/csrc/jit/passes/requires_grad_analysis.cpp",
"torch/csrc/jit/passes/shape_analysis.cpp",
"torch/csrc/jit/passes/specialize_autogradzero.cpp",
"torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp",
"torch/csrc/jit/passes/subgraph_rewrite.cpp",
"torch/csrc/jit/passes/tensorexpr_fuser.cpp",
"torch/csrc/jit/passes/utils/memory_dag.cpp",
"torch/csrc/jit/passes/utils/subgraph_utils.cpp",
"torch/csrc/jit/passes/xnnpack_rewrite.cpp",
"torch/csrc/jit/passes/vulkan_rewrite.cpp",
"torch/csrc/jit/passes/metal_rewrite.cpp",
"torch/csrc/jit/passes/quantization/helper.cpp",
"torch/csrc/jit/passes/quantization/quantization_type.cpp",
"torch/csrc/jit/passes/quantization/insert_observers.cpp",
"torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp",
"torch/csrc/jit/passes/quantization/dedup_module_uses.cpp",
"torch/csrc/jit/passes/quantization/finalize.cpp",
"torch/csrc/jit/passes/quantization/fusion_passes.cpp",
"torch/csrc/jit/python/update_graph_executor_opt.cpp",
"torch/csrc/jit/runtime/argument_spec.cpp",
"torch/csrc/jit/runtime/autodiff.cpp",
"torch/csrc/jit/runtime/graph_executor.cpp",
"torch/csrc/jit/runtime/interpreter/frame.cpp",
"torch/csrc/jit/runtime/interpreter/preprocess_graph.cpp",
"torch/csrc/jit/runtime/interpreter.cpp",
"torch/csrc/jit/runtime/logging.cpp",
"torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp",
"torch/csrc/jit/runtime/profiling_record.cpp",
"torch/csrc/jit/runtime/script_profile.cpp",
"torch/csrc/jit/runtime/symbolic_script.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/import.cpp",
"torch/csrc/jit/serialization/import_export_helpers.cpp",
"torch/csrc/jit/serialization/import_source.cpp",
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/python_print.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
"torch/csrc/jit/tensorexpr/block_codegen.cpp",
"torch/csrc/jit/tensorexpr/bounds_inference.cpp",
"torch/csrc/jit/tensorexpr/bounds_overlap.cpp",
"torch/csrc/jit/tensorexpr/codegen.cpp",
"torch/csrc/jit/tensorexpr/cpp_codegen.cpp",
"torch/csrc/jit/tensorexpr/eval.cpp",
"torch/csrc/jit/tensorexpr/expr.cpp",
"torch/csrc/jit/tensorexpr/external_functions_registry.cpp",
"torch/csrc/jit/tensorexpr/hash_provider.cpp",
"torch/csrc/jit/tensorexpr/intrinsic_symbols.cpp",
"torch/csrc/jit/tensorexpr/ir.cpp",
"torch/csrc/jit/tensorexpr/ir_mutator.cpp",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
"torch/csrc/jit/tensorexpr/ir_simplifier.cpp",
"torch/csrc/jit/tensorexpr/ir_verifier.cpp",
"torch/csrc/jit/tensorexpr/ir_visitor.cpp",
"torch/csrc/jit/tensorexpr/kernel.cpp",
"torch/csrc/jit/tensorexpr/llvm_codegen.cpp",
"torch/csrc/jit/tensorexpr/llvm_jit.cpp",
"torch/csrc/jit/tensorexpr/loopnest.cpp",
"torch/csrc/jit/tensorexpr/mem_arena.cpp",
"torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp",
"torch/csrc/jit/tensorexpr/operators/conv2d.cpp",
"torch/csrc/jit/tensorexpr/reduction.cpp",
"torch/csrc/jit/tensorexpr/registerizer.cpp",
"torch/csrc/jit/tensorexpr/tensor.cpp",
"torch/csrc/jit/tensorexpr/types.cpp",
"torch/csrc/jit/tensorexpr/unique_name_manager.cpp",
"torch/csrc/jit/testing/file_check.cpp",
"torch/csrc/jit/testing/hooks_for_testing.cpp",
"torch/csrc/utils/tensor_flatten.cpp",
"torch/csrc/utils/variadic.cpp",
]
core_sources_full = core_sources_full_mobile + [
"torch/csrc/jit/runtime/static/fusion.cpp",
"torch/csrc/jit/runtime/static/impl.cpp",
"torch/csrc/jit/runtime/static/ops.cpp",
"torch/csrc/jit/runtime/static/passes.cpp",
"torch/csrc/jit/tensorexpr/external_functions.cpp",
"torch/csrc/jit/tensorexpr/external_functions_codegen.cpp",
]
libtorch_core_sources = sorted(core_sources_common + core_sources_full + core_trainer_sources)
libtorch_distributed_sources = [
"torch/csrc/distributed/autograd/autograd.cpp",
"torch/csrc/distributed/autograd/utils.cpp",
"torch/csrc/distributed/autograd/context/container.cpp",
"torch/csrc/distributed/autograd/context/context.cpp",
"torch/csrc/distributed/autograd/engine/dist_engine.cpp",
"torch/csrc/distributed/autograd/functions/recvrpc_backward.cpp",
"torch/csrc/distributed/autograd/functions/sendrpc_backward.cpp",
"torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.cpp",
"torch/csrc/distributed/rpc/message.cpp",
"torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp",
"torch/csrc/distributed/rpc/profiler/server_process_global_profiler.cpp",
"torch/csrc/distributed/rpc/python_call.cpp",
"torch/csrc/distributed/rpc/python_remote_call.cpp",
"torch/csrc/distributed/rpc/python_resp.cpp",
"torch/csrc/distributed/rpc/request_callback.cpp",
"torch/csrc/distributed/rpc/request_callback_no_python.cpp",
"torch/csrc/distributed/rpc/rpc_agent.cpp",
"torch/csrc/distributed/rpc/rref_context.cpp",
"torch/csrc/distributed/rpc/rref_impl.cpp",
"torch/csrc/distributed/rpc/rref_proto.cpp",
"torch/csrc/distributed/rpc/script_call.cpp",
"torch/csrc/distributed/rpc/script_remote_call.cpp",
"torch/csrc/distributed/rpc/script_resp.cpp",
"torch/csrc/distributed/rpc/torchscript_functions.cpp",
"torch/csrc/distributed/rpc/types.cpp",
"torch/csrc/distributed/rpc/utils.cpp",
"torch/csrc/distributed/rpc/metrics/registry.cpp",
]
jit_sources_full = [
"torch/csrc/jit/codegen/cuda/interface.cpp",
"torch/csrc/jit/passes/lower_graph.cpp",
"torch/csrc/jit/runtime/register_c10_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
"torch/csrc/jit/passes/remove_inplace_ops.cpp",
"torch/csrc/jit/passes/utils/check_alias_annotation.cpp",
]
libtorch_core_jit_sources = sorted(jit_sources_full)
torch_mobile_core = [
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/model_compatibility.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
]
libtorch_lite_eager_symbolication = [
"torch/csrc/jit/frontend/source_range.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
# Later we can split serialization and deserialization logic
# to have better separation within build and only build relevant parts.
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
# TODO: core_trainer_sources is not necessary for libtorch lite
libtorch_lite_cmake_sources = sorted(core_trainer_sources + core_sources_common + torch_mobile_core)
libtorch_cmake_sources = libtorch_core_sources + libtorch_core_jit_sources
libtorch_extra_sources = libtorch_core_jit_sources + [
"torch/csrc/autograd/TraceTypeManual.cpp",
"torch/csrc/autograd/VariableTypeManual.cpp",
"torch/csrc/autograd/FunctionsManual.cpp",
"torch/csrc/jit/api/module_save.cpp",
"torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp",
"torch/csrc/jit/mobile/backport.cpp",
"torch/csrc/jit/mobile/backport_manager.cpp",
# To be included for eager symbolication in lite interpreter
# when it is built in libtorch
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/import_data.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/model_compatibility.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/nnc/context.cpp",
"torch/csrc/jit/mobile/nnc/registry.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/train/export_data.cpp",
"torch/csrc/jit/mobile/train/optim/sgd.cpp",
"torch/csrc/jit/mobile/train/random.cpp",
"torch/csrc/jit/mobile/train/sequential.cpp",
"torch/csrc/jit/serialization/onnx.cpp",
"torch/csrc/jit/serialization/export.cpp",
"torch/csrc/jit/serialization/export_module.cpp",
"torch/csrc/jit/serialization/import_legacy.cpp",
"torch/csrc/utils/byte_order.cpp",
"torch/csrc/utils/out_types.cpp",
]
def libtorch_sources(gencode_pattern = ":generate-code[{}]"):
return libtorch_generated_sources(gencode_pattern) + libtorch_core_sources + libtorch_distributed_sources + libtorch_extra_sources
libtorch_cuda_core_sources = [
"torch/csrc/CudaIPCTypes.cpp",
"torch/csrc/cuda/comm.cpp",
"torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp",
"torch/csrc/autograd/profiler_cuda.cpp",
"torch/csrc/autograd/functions/comm.cpp",
"torch/csrc/jit/codegen/cuda/arith.cpp",
"torch/csrc/jit/codegen/cuda/compute_at.cpp",
"torch/csrc/jit/codegen/cuda/codegen.cpp",
"torch/csrc/jit/codegen/cuda/dispatch.cpp",
"torch/csrc/jit/codegen/cuda/expr_evaluator.cpp",
"torch/csrc/jit/codegen/cuda/executor.cpp",
"torch/csrc/jit/codegen/cuda/executor_kernel_arg.cpp",
"torch/csrc/jit/codegen/cuda/executor_launch_params.cpp",
"torch/csrc/jit/codegen/cuda/executor_utils.cpp",
"torch/csrc/jit/codegen/cuda/fusion.cpp",
"torch/csrc/jit/codegen/cuda/graph_fuser.cpp",
"torch/csrc/jit/codegen/cuda/index_compute.cpp",
"torch/csrc/jit/codegen/cuda/instrumentation.cpp",
"torch/csrc/jit/codegen/cuda/ir_base_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_cloner.cpp",
"torch/csrc/jit/codegen/cuda/ir_graphviz.cpp",
"torch/csrc/jit/codegen/cuda/ir_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_iostream.cpp",
"torch/csrc/jit/codegen/cuda/iter_visitor.cpp",
"torch/csrc/jit/codegen/cuda/kernel.cpp",
"torch/csrc/jit/codegen/cuda/kernel_cache.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_builder.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_printer.cpp",
"torch/csrc/jit/codegen/cuda/lower_index.cpp",
"torch/csrc/jit/codegen/cuda/lower_loops.cpp",
"torch/csrc/jit/codegen/cuda/lower_alias_memory.cpp",
"torch/csrc/jit/codegen/cuda/lower_insert_syncs.cpp",
"torch/csrc/jit/codegen/cuda/lower_unroll.cpp",
"torch/csrc/jit/codegen/cuda/lower_thread_predicate.cpp",
"torch/csrc/jit/codegen/cuda/lower_utils.cpp",
"torch/csrc/jit/codegen/cuda/lower_validation.cpp",
"torch/csrc/jit/codegen/cuda/lower2device.cpp",
"torch/csrc/jit/codegen/cuda/manager.cpp",
"torch/csrc/jit/codegen/cuda/mutator.cpp",
"torch/csrc/jit/codegen/cuda/parser.cpp",
"torch/csrc/jit/codegen/cuda/partition.cpp",
"torch/csrc/jit/codegen/cuda/predicate_compute.cpp",
"torch/csrc/jit/codegen/cuda/register_interface.cpp",
"torch/csrc/jit/codegen/cuda/scheduler.cpp",
"torch/csrc/jit/codegen/cuda/shape_inference.cpp",
"torch/csrc/jit/codegen/cuda/tensor_view.cpp",
"torch/csrc/jit/codegen/cuda/transform_iter.cpp",
"torch/csrc/jit/codegen/cuda/transform_replay.cpp",
"torch/csrc/jit/codegen/cuda/transform_rfactor.cpp",
"torch/csrc/jit/codegen/cuda/type.cpp",
"torch/csrc/jit/tensorexpr/cuda_codegen.cpp",
"torch/csrc/jit/runtime/register_cuda_ops.cpp",
]
libtorch_cuda_sources = libtorch_cuda_core_sources + [
"torch/csrc/cuda/nccl.cpp",
]
torch_cpp_srcs = [
"torch/csrc/api/src/cuda.cpp", # this just forwards stuff, no real CUDA
"torch/csrc/api/src/data/datasets/mnist.cpp",
"torch/csrc/api/src/data/samplers/distributed.cpp",
"torch/csrc/api/src/data/samplers/random.cpp",
"torch/csrc/api/src/data/samplers/sequential.cpp",
"torch/csrc/api/src/data/samplers/stream.cpp",
"torch/csrc/api/src/enum.cpp",
"torch/csrc/api/src/jit.cpp",
"torch/csrc/api/src/serialize.cpp",
"torch/csrc/api/src/nn/init.cpp",
"torch/csrc/api/src/nn/module.cpp",
"torch/csrc/api/src/nn/modules/_functions.cpp",
"torch/csrc/api/src/nn/modules/activation.cpp",
"torch/csrc/api/src/nn/modules/adaptive.cpp",
"torch/csrc/api/src/nn/modules/batchnorm.cpp",
"torch/csrc/api/src/nn/modules/normalization.cpp",
"torch/csrc/api/src/nn/modules/instancenorm.cpp",
"torch/csrc/api/src/nn/modules/conv.cpp",
"torch/csrc/api/src/nn/modules/dropout.cpp",
"torch/csrc/api/src/nn/modules/distance.cpp",
"torch/csrc/api/src/nn/modules/embedding.cpp",
"torch/csrc/api/src/nn/modules/fold.cpp",
"torch/csrc/api/src/nn/modules/linear.cpp",
"torch/csrc/api/src/nn/modules/loss.cpp",
"torch/csrc/api/src/nn/modules/padding.cpp",
"torch/csrc/api/src/nn/modules/pixelshuffle.cpp",
"torch/csrc/api/src/nn/modules/pooling.cpp",
"torch/csrc/api/src/nn/modules/rnn.cpp",
"torch/csrc/api/src/nn/modules/upsampling.cpp",
"torch/csrc/api/src/nn/modules/transformer.cpp",
"torch/csrc/api/src/nn/modules/container/functional.cpp",
"torch/csrc/api/src/nn/options/activation.cpp",
"torch/csrc/api/src/nn/options/adaptive.cpp",
"torch/csrc/api/src/nn/options/batchnorm.cpp",
"torch/csrc/api/src/nn/options/conv.cpp",
"torch/csrc/api/src/nn/options/dropout.cpp",
"torch/csrc/api/src/nn/options/instancenorm.cpp",
"torch/csrc/api/src/nn/options/linear.cpp",
"torch/csrc/api/src/nn/options/normalization.cpp",
"torch/csrc/api/src/nn/options/embedding.cpp",
"torch/csrc/api/src/nn/options/padding.cpp",
"torch/csrc/api/src/nn/options/pooling.cpp",
"torch/csrc/api/src/nn/options/rnn.cpp",
"torch/csrc/api/src/nn/options/vision.cpp",
"torch/csrc/api/src/nn/options/transformer.cpp",
"torch/csrc/api/src/optim/adagrad.cpp",
"torch/csrc/api/src/optim/adam.cpp",
"torch/csrc/api/src/optim/adamw.cpp",
"torch/csrc/api/src/optim/lbfgs.cpp",
"torch/csrc/api/src/optim/optimizer.cpp",
"torch/csrc/api/src/optim/rmsprop.cpp",
"torch/csrc/api/src/optim/serialize.cpp",
"torch/csrc/api/src/optim/sgd.cpp",
"torch/csrc/api/src/optim/schedulers/lr_scheduler.cpp",
"torch/csrc/api/src/optim/schedulers/step_lr.cpp",
"torch/csrc/api/src/serialize/input-archive.cpp",
"torch/csrc/api/src/serialize/output-archive.cpp",
]
libtorch_python_cuda_core_sources = [
"torch/csrc/cuda/Event.cpp",
"torch/csrc/cuda/Module.cpp",
"torch/csrc/cuda/python_comm.cpp",
"torch/csrc/cuda/Storage.cpp",
"torch/csrc/cuda/Stream.cpp",
"torch/csrc/cuda/Graph.cpp",
"torch/csrc/cuda/serialization.cpp",
"torch/csrc/cuda/shared/cudart.cpp",
"torch/csrc/cuda/shared/nvtx.cpp",
"torch/csrc/cuda/utils.cpp",
]
libtorch_python_cuda_sources = libtorch_python_cuda_core_sources + [
"torch/csrc/cuda/python_nccl.cpp",
"torch/csrc/cuda/shared/cudnn.cpp",
"torch/csrc/cuda/Tensor.cpp",
]
libtorch_python_core_sources = [
"torch/csrc/DataLoader.cpp",
"torch/csrc/Device.cpp",
"torch/csrc/Dtype.cpp",
"torch/csrc/DynamicTypes.cpp",
"torch/csrc/Exceptions.cpp",
"torch/csrc/Generator.cpp",
"torch/csrc/Layout.cpp",
"torch/csrc/MemoryFormat.cpp",
"torch/csrc/QScheme.cpp",
"torch/csrc/Module.cpp",
"torch/csrc/python_dimname.cpp",
"torch/csrc/Size.cpp",
"torch/csrc/Storage.cpp",
"torch/csrc/Stream.cpp",
"torch/csrc/TypeInfo.cpp",
"torch/csrc/api/src/python/init.cpp",
"torch/csrc/autograd/functions/init.cpp",
"torch/csrc/autograd/init.cpp",
"torch/csrc/autograd/python_anomaly_mode.cpp",
"torch/csrc/autograd/python_cpp_function.cpp",
"torch/csrc/autograd/python_engine.cpp",
"torch/csrc/autograd/python_function.cpp",
"torch/csrc/autograd/python_hook.cpp",
"torch/csrc/autograd/python_legacy_variable.cpp",
"torch/csrc/autograd/python_variable.cpp",
"torch/csrc/autograd/python_variable_indexing.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
"torch/csrc/jit/passes/onnx.cpp",
"torch/csrc/jit/passes/onnx/cast_all_constant_to_floating.cpp",
"torch/csrc/jit/passes/onnx/eval_peephole.cpp",
"torch/csrc/jit/passes/onnx/constant_fold.cpp",
"torch/csrc/jit/passes/onnx/constant_map.cpp",
"torch/csrc/jit/passes/onnx/eliminate_unused_items.cpp",
"torch/csrc/jit/passes/onnx/fixup_onnx_controlflow.cpp",
"torch/csrc/jit/passes/onnx/list_model_parameters.cpp",
"torch/csrc/jit/passes/onnx/function_substitution.cpp",
"torch/csrc/jit/passes/onnx/fold_if_node.cpp",
"torch/csrc/jit/passes/onnx/helper.cpp",
"torch/csrc/jit/passes/onnx/peephole.cpp",
"torch/csrc/jit/passes/onnx/preprocess_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/prepare_division_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp",
"torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp",
"torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/shape_type_inference.cpp",
"torch/csrc/jit/python/pybind_utils.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp",
"torch/csrc/jit/python/python_arg_flatten.cpp",
"torch/csrc/jit/python/python_custom_class.cpp",
"torch/csrc/jit/python/python_interpreter.cpp",
"torch/csrc/jit/python/python_ir.cpp",
"torch/csrc/jit/python/python_tracer.cpp",
"torch/csrc/jit/python/script_init.cpp",
"torch/csrc/jit/frontend/concrete_module_type.cpp",
"torch/csrc/jit/frontend/tree_views.cpp",
"torch/csrc/jit/python/python_sugared_value.cpp",
"torch/csrc/jit/python/python_tree_views.cpp",
"torch/csrc/jit/runtime/static/init.cpp",
"torch/csrc/fx/fx_init.cpp",
"torch/csrc/jit/tensorexpr/tensorexpr_init.cpp",
"torch/csrc/multiprocessing/init.cpp",
"torch/csrc/onnx/init.cpp",
"torch/csrc/serialization.cpp",
"torch/csrc/tensor/python_tensor.cpp",
"torch/csrc/utils/init.cpp",
"torch/csrc/utils/throughput_benchmark.cpp",
"torch/csrc/utils.cpp",
"torch/csrc/utils/cuda_lazy_init.cpp",
"torch/csrc/utils/invalid_arguments.cpp",
"torch/csrc/utils/crash_handler.cpp",
"torch/csrc/utils/object_ptr.cpp",
"torch/csrc/utils/python_arg_parser.cpp",
"torch/csrc/utils/python_dispatch.cpp",
"torch/csrc/utils/structseq.cpp",
"torch/csrc/utils/tensor_apply.cpp",
"torch/csrc/utils/tensor_dtypes.cpp",
"torch/csrc/utils/tensor_layouts.cpp",
"torch/csrc/utils/tensor_memoryformats.cpp",
"torch/csrc/utils/tensor_qschemes.cpp",
"torch/csrc/utils/tensor_list.cpp",
"torch/csrc/utils/tensor_new.cpp",
"torch/csrc/utils/tensor_numpy.cpp",
"torch/csrc/utils/tensor_types.cpp",
"torch/csrc/utils/disable_torch_function.cpp",
]
libtorch_python_distributed_core_sources = [
"torch/lib/c10d/comm.cpp",
"torch/lib/c10d/default_comm_hooks.cpp",
"torch/lib/c10d/frontend.cpp",
"torch/lib/c10d/reducer.cpp",
"torch/lib/c10d/logger.cpp",
"torch/csrc/distributed/c10d/python_comm_hook.cpp",
"torch/csrc/distributed/c10d/init.cpp",
]
libtorch_python_distributed_sources = libtorch_python_distributed_core_sources + [
"torch/csrc/distributed/autograd/init.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/init.cpp",
"torch/csrc/distributed/rpc/process_group_agent.cpp",
"torch/csrc/distributed/rpc/py_rref.cpp",
"torch/csrc/distributed/rpc/python_functions.cpp",
"torch/csrc/distributed/rpc/python_rpc_handler.cpp",
"torch/csrc/distributed/rpc/request_callback_impl.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_process_group_agent.cpp",
"torch/csrc/distributed/rpc/testing/init.cpp",
"torch/csrc/distributed/rpc/unpickled_python_call.cpp",
"torch/csrc/distributed/rpc/unpickled_python_remote_call.cpp",
"torch/csrc/jit/runtime/register_distributed_ops.cpp",
]
def glob_libtorch_python_sources(gencode_pattern = ":generate-code[{}]"):
_libtorch_python_sources = [gencode_pattern.format(name) for name in [
"autograd/generated/python_functions.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions.cpp",
"autograd/generated/python_variable_methods.cpp",
]]
_libtorch_python_sources.extend(libtorch_python_core_sources)
_libtorch_python_sources.extend(libtorch_python_distributed_sources)
return _libtorch_python_sources
aten_cpu_source_non_codegen_list = [
"aten/src/ATen/BatchedTensorImpl.cpp",
"aten/src/ATen/CPUGeneratorImpl.cpp",
"aten/src/ATen/Context.cpp",
"aten/src/ATen/DLConvertor.cpp",
"aten/src/ATen/ExpandUtils.cpp",
"aten/src/ATen/MemoryOverlap.cpp",
"aten/src/ATen/NamedTensorUtils.cpp",
"aten/src/ATen/ParallelCommon.cpp",
"aten/src/ATen/ParallelNative.cpp",
"aten/src/ATen/ParallelNativeTBB.cpp",
"aten/src/ATen/ParallelOpenMP.cpp",
"aten/src/ATen/ParallelThreadPoolNative.cpp",
"aten/src/ATen/ScalarOps.cpp",
"aten/src/ATen/SequenceNumber.cpp",
"aten/src/ATen/SparseTensorImpl.cpp",
"aten/src/ATen/SparseCsrTensorImpl.cpp",
"aten/src/ATen/SparseTensorUtils.cpp",
"aten/src/ATen/TensorGeometry.cpp",
"aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorMeta.cpp",
"aten/src/ATen/TensorNames.cpp",
"aten/src/ATen/TensorUtils.cpp",
"aten/src/ATen/ThreadLocalState.cpp",
"aten/src/ATen/Utils.cpp",
"aten/src/ATen/Version.cpp",
"aten/src/ATen/VmapMode.cpp",
"aten/src/ATen/VmapTransforms.cpp",
"aten/src/ATen/core/BackendSelectFallbackKernel.cpp",
"aten/src/ATen/core/DeprecatedTypeProperties.cpp",
"aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp",
"aten/src/ATen/core/Dict.cpp",
"aten/src/ATen/core/Dimname.cpp",
"aten/src/ATen/core/Formatting.cpp",
"aten/src/ATen/core/Generator.cpp",
"aten/src/ATen/core/List.cpp",
"aten/src/ATen/core/NamedTensor.cpp",
"aten/src/ATen/core/Tensor.cpp",
"aten/src/ATen/core/VariableFallbackKernel.cpp",
"aten/src/ATen/core/VariableHooksInterface.cpp",
"aten/src/ATen/core/Vitals.cpp",
"aten/src/ATen/core/boxing/KernelFunction.cpp",
"aten/src/ATen/core/custom_class.cpp",
"aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp",
"aten/src/ATen/core/dispatch/Dispatcher.cpp",
"aten/src/ATen/core/dispatch/ObservedOperators.cpp",
"aten/src/ATen/core/dispatch/OperatorEntry.cpp",
"aten/src/ATen/core/interned_strings.cpp",
"aten/src/ATen/core/ivalue.cpp",
"aten/src/ATen/core/library.cpp",
"aten/src/ATen/core/op_registration/infer_schema.cpp",
"aten/src/ATen/core/op_registration/op_registration.cpp",
"aten/src/ATen/core/operator_name.cpp",
"aten/src/ATen/core/register_symbols.cpp",
"aten/src/ATen/core/type.cpp",
"aten/src/ATen/cpu/FlushDenormal.cpp",
"aten/src/ATen/detail/CPUGuardImpl.cpp",
"aten/src/ATen/detail/CUDAHooksInterface.cpp",
"aten/src/ATen/detail/HIPHooksInterface.cpp",
"aten/src/ATen/metal/Context.cpp",
"aten/src/ATen/native/AutogradComposite.cpp",
"aten/src/ATen/native/BatchLinearAlgebraKernel.cpp",
"aten/src/ATen/native/DispatchStub.cpp",
"aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/mkl/LinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SparseCsrLinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SpectralOps.cpp",
"aten/src/ATen/native/mkldnn/BinaryOps.cpp",
"aten/src/ATen/native/mkldnn/Conv.cpp",
"aten/src/ATen/native/mkldnn/Copy.cpp",
"aten/src/ATen/native/mkldnn/IDeepRegistration.cpp",
"aten/src/ATen/native/mkldnn/Linear.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp",
"aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp",
"aten/src/ATen/native/mkldnn/Normalization.cpp",
"aten/src/ATen/native/mkldnn/Pooling.cpp",
"aten/src/ATen/native/mkldnn/Relu.cpp",
"aten/src/ATen/native/mkldnn/SoftMax.cpp",
"aten/src/ATen/native/mkldnn/TensorFactories.cpp",
"aten/src/ATen/native/mkldnn/TensorShape.cpp",
"aten/src/ATen/native/mkldnn/UnaryOps.cpp",
"aten/src/ATen/native/mkldnn/Utils.cpp",
"aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/record_function.cpp",
"aten/src/ATen/vulkan/Context.cpp",
]
aten_cpu_source_codegen_list = [
"aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp",
]
# When buliding lite interpreter in OSS, "aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp" will go through
# codegen process. The codegen version of this file, like Activation.cpp.DEFAULT.cpp, will be included
# in ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt. As a result, in aten/src/ATen/CMakeLists.txt,
# only aten_cpu_source_non_codegen_list need to be added to ${all_cpu_cpp}.
aten_cpu_source_list = sorted(aten_cpu_source_non_codegen_list + aten_cpu_source_codegen_list)
# Same as ${aten_cpu_source_codegen_list}, this list will go through aten codegen, and be included in
# ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt.
aten_native_source_codegen_list = [
"aten/src/ATen/native/cpu/Activation.cpp",
"aten/src/ATen/native/cpu/BinaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/BlasKernel.cpp",
"aten/src/ATen/native/cpu/CatKernel.cpp",
"aten/src/ATen/native/cpu/ComplexKernel.cpp",
"aten/src/ATen/native/cpu/CopyKernel.cpp",
"aten/src/ATen/native/cpu/CrossKernel.cpp",
"aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp",
"aten/src/ATen/native/cpu/DistanceOpsKernel.cpp",
"aten/src/ATen/native/cpu/FillKernel.cpp",
"aten/src/ATen/native/cpu/FunctionOfAMatrixUtilsKernel.cpp",
"aten/src/ATen/native/cpu/GridSamplerKernel.cpp",
"aten/src/ATen/native/cpu/IndexKernel.cpp",
"aten/src/ATen/native/cpu/LerpKernel.cpp",
"aten/src/ATen/native/cpu/LinearAlgebraKernel.cpp",
"aten/src/ATen/native/cpu/MaxPooling.cpp",
"aten/src/ATen/native/cpu/MaxPoolKernel.cpp",
"aten/src/ATen/native/cpu/MultinomialKernel.cpp",
"aten/src/ATen/native/cpu/PointwiseOpsKernel.cpp",
"aten/src/ATen/native/cpu/PowKernel.cpp",
"aten/src/ATen/native/cpu/RangeFactoriesKernel.cpp",
"aten/src/ATen/native/cpu/ReduceAllOpsKernel.cpp",
"aten/src/ATen/native/cpu/ReduceOpsKernel.cpp",
"aten/src/ATen/native/cpu/ScatterGatherKernel.cpp",
"aten/src/ATen/native/cpu/SoftMaxKernel.cpp",
"aten/src/ATen/native/cpu/SortingKernel.cpp",
"aten/src/ATen/native/cpu/StackKernel.cpp",
"aten/src/ATen/native/cpu/SumKernel.cpp",
"aten/src/ATen/native/cpu/TensorCompareKernel.cpp",
"aten/src/ATen/native/cpu/UnaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/Unfold2d.cpp",
"aten/src/ATen/native/cpu/UnfoldBackwardKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp",
"aten/src/ATen/native/cpu/batch_norm_kernel.cpp",
"aten/src/ATen/native/cpu/group_norm_kernel.cpp",
"aten/src/ATen/native/cpu/layer_norm_kernel.cpp",
"aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp",
]
# This aten native source file list will not go through aten codegen process
aten_native_source_non_codegen_list = [
"aten/src/ATen/native/ao_sparse/library.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp",
"aten/src/ATen/native/quantized/cpu/make_per_tensor_quantized_tensor.cpp",
"aten/src/ATen/native/quantized/cpu/q_adaavgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool3d.cpp",
"aten/src/ATen/native/quantized/cpu/qadd.cpp",
"aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp",
"aten/src/ATen/native/quantized/cpu/qchannel_shuffle.cpp",
"aten/src/ATen/native/quantized/cpu/qclamp.cpp",
"aten/src/ATen/native/quantized/cpu/qconcat.cpp",
"aten/src/ATen/native/quantized/cpu/qconv.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qelu.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qhardswish.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qmul.cpp",
"aten/src/ATen/native/quantized/cpu/qnormalization.cpp",
"aten/src/ATen/native/quantized/cpu/qpool.cpp",
"aten/src/ATen/native/quantized/cpu/qreduction.cpp",
"aten/src/ATen/native/quantized/cpu/qrelu.cpp",
"aten/src/ATen/native/quantized/cpu/qsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qsort.cpp",
"aten/src/ATen/native/quantized/cpu/qtanh.cpp",
"aten/src/ATen/native/quantized/cpu/qthreshold.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_bilinear2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest3d.cpp",
"aten/src/ATen/native/quantized/cpu/tensor_operators.cpp",
"aten/src/ATen/native/quantized/Copy.cpp",
"aten/src/ATen/native/quantized/QTensor.cpp",
"aten/src/ATen/native/quantized/TensorCompare.cpp",
"aten/src/ATen/native/quantized/TensorFactories.cpp",
"aten/src/ATen/native/quantized/affine_quantizer.cpp",
"aten/src/ATen/native/quantized/affine_quantizer_base.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_channel_affine.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp",
"aten/src/ATen/native/quantized/library.cpp",
"aten/src/ATen/quantized/QTensorImpl.cpp",
"aten/src/ATen/quantized/Quantizer.cpp",
"aten/src/ATen/native/Activation.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling3d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling2d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling3d.cpp",
"aten/src/ATen/native/AffineGridGenerator.cpp",
"aten/src/ATen/native/AveragePool2d.cpp",
"aten/src/ATen/native/AveragePool3d.cpp",
"aten/src/ATen/native/BatchLinearAlgebra.cpp",
"aten/src/ATen/native/Batching.cpp",
"aten/src/ATen/native/BinaryOps.cpp",
"aten/src/ATen/native/Blas.cpp",
"aten/src/ATen/native/BlasKernel.cpp",
"aten/src/ATen/native/Bucketization.cpp",
"aten/src/ATen/native/CPUBlas.cpp",
"aten/src/ATen/native/ChanelShuffle.cpp",
"aten/src/ATen/native/Col2Im.cpp",
"aten/src/ATen/native/ConstantPadNd.cpp",
"aten/src/ATen/native/Convolution.cpp",
"aten/src/ATen/native/ConvolutionMM2d.cpp",
"aten/src/ATen/native/ConvolutionMM3d.cpp",
"aten/src/ATen/native/ConvolutionTBC.cpp",
"aten/src/ATen/native/Copy.cpp",
"aten/src/ATen/native/Cross.cpp",
"aten/src/ATen/native/DilatedMaxPool2d.cpp",
"aten/src/ATen/native/DilatedMaxPool3d.cpp",
# Referenced by both native and ATen/Version.cpp. Does not reference to other native symbols
# "aten/src/ATen/native/DispatchStub.cpp",
# "aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/native/Distance.cpp",
"aten/src/ATen/native/Distributions.cpp",
"aten/src/ATen/native/Dropout.cpp",
"aten/src/ATen/native/Embedding.cpp",
"aten/src/ATen/native/EmbeddingBag.cpp",
"aten/src/ATen/native/Fill.cpp",
"aten/src/ATen/native/ForeachOpsKernels.cpp",
"aten/src/ATen/native/FractionalMaxPool2d.cpp",
"aten/src/ATen/native/FractionalMaxPool3d.cpp",
"aten/src/ATen/native/FunctionOfAMatrixUtils.cpp",
"aten/src/ATen/native/GatedLinearUnit.cpp",
"aten/src/ATen/native/GridSampler.cpp",
"aten/src/ATen/native/Im2Col.cpp",
"aten/src/ATen/native/IndexingUtils.cpp",
"aten/src/ATen/native/Integration.cpp",
"aten/src/ATen/native/Itertools.cpp",
"aten/src/ATen/native/LegacyBridge.cpp",
"aten/src/ATen/native/LegacyNNDefinitions.cpp",
"aten/src/ATen/native/Lerp.cpp",
"aten/src/ATen/native/Linear.cpp",
"aten/src/ATen/native/LinearAlgebra.cpp",
"aten/src/ATen/native/Loss.cpp",
"aten/src/ATen/native/LossCTC.cpp",
"aten/src/ATen/native/LossMultiLabelMargin.cpp",
"aten/src/ATen/native/LossMultiMargin.cpp",
"aten/src/ATen/native/LossNLL.cpp",
"aten/src/ATen/native/LossNLL2d.cpp",
"aten/src/ATen/native/MaxPooling.cpp",
"aten/src/ATen/native/MaxUnpooling.cpp",
"aten/src/ATen/native/Memory.cpp",
"aten/src/ATen/native/MetaTensor.cpp",
"aten/src/ATen/native/NNPACK.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp",
"aten/src/ATen/native/NaiveDilatedConvolution.cpp",
"aten/src/ATen/native/NamedTensor.cpp",
"aten/src/ATen/native/Normalization.cpp",
"aten/src/ATen/native/Onehot.cpp",
"aten/src/ATen/native/PackedSequence.cpp",
"aten/src/ATen/native/PixelShuffle.cpp",
"aten/src/ATen/native/PointwiseOps.cpp",
"aten/src/ATen/native/Pooling.cpp",
"aten/src/ATen/native/Pow.cpp",
"aten/src/ATen/native/QuantizedLinear.cpp",
"aten/src/ATen/native/RNN.cpp",
"aten/src/ATen/native/RangeFactories.cpp",
"aten/src/ATen/native/ReduceAllOps.cpp",
"aten/src/ATen/native/ReduceOps.cpp",
"aten/src/ATen/native/ReflectionPad.cpp",
"aten/src/ATen/native/Repeat.cpp",
"aten/src/ATen/native/ReplicationPadding.cpp",
"aten/src/ATen/native/Resize.cpp",
"aten/src/ATen/native/RowwisePrune.cpp",
"aten/src/ATen/native/SegmentReduce.cpp",
"aten/src/ATen/native/Scalar.cpp",
"aten/src/ATen/native/SobolEngineOps.cpp",
"aten/src/ATen/native/SobolEngineOpsUtils.cpp",
"aten/src/ATen/native/SoftMax.cpp",
"aten/src/ATen/native/Sorting.cpp",
"aten/src/ATen/native/SpectralOps.cpp",
"aten/src/ATen/native/SummaryOps.cpp",
"aten/src/ATen/native/TensorAdvancedIndexing.cpp",
"aten/src/ATen/native/TensorCompare.cpp",
"aten/src/ATen/native/TensorConversions.cpp",
"aten/src/ATen/native/TensorFactories.cpp",
"aten/src/ATen/native/TensorIteratorReduce.cpp",
"aten/src/ATen/native/TensorProperties.cpp",
"aten/src/ATen/native/TensorShape.cpp",
"aten/src/ATen/native/TensorTransformations.cpp",
"aten/src/ATen/native/TestOps.cpp",
"aten/src/ATen/native/TriangularOps.cpp",
"aten/src/ATen/native/TypeProperties.cpp",
"aten/src/ATen/native/UnaryOps.cpp",
"aten/src/ATen/native/Unfold2d.cpp",
"aten/src/ATen/native/Unfold3d.cpp",
"aten/src/ATen/native/UnfoldBackward.cpp",
"aten/src/ATen/native/Unique.cpp",
# Low-level functions that can be directly referenced
# "aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/UpSampleBicubic2d.cpp",
"aten/src/ATen/native/UpSampleBilinear2d.cpp",
"aten/src/ATen/native/UpSampleLinear1d.cpp",
"aten/src/ATen/native/UpSampleNearest1d.cpp",
"aten/src/ATen/native/UpSampleNearest2d.cpp",
"aten/src/ATen/native/UpSampleNearest3d.cpp",
"aten/src/ATen/native/UpSampleTrilinear3d.cpp",
"aten/src/ATen/native/VariableMethodStubs.cpp",
"aten/src/ATen/native/WeightNorm.cpp",
"aten/src/ATen/native/group_norm.cpp",
"aten/src/ATen/native/layer_norm.cpp",
"aten/src/ATen/native/sparse/ParamUtils.cpp",
"aten/src/ATen/native/sparse/SoftMax.cpp",
"aten/src/ATen/native/sparse/SparseMatMul.cpp",
"aten/src/ATen/native/sparse/SparseTensor.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensor.cpp",
"aten/src/ATen/native/sparse/SparseTensorMath.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp",
"aten/src/TH/THAllocator.cpp",
"aten/src/TH/THBlas.cpp",
"aten/src/TH/THGeneral.cpp",
"aten/src/TH/THLapack.cpp",
"aten/src/TH/THStorageFunctions.cpp",
"aten/src/TH/THTensor.cpp",
"aten/src/TH/THTensorEvenMoreMath.cpp",
"aten/src/TH/THTensorLapack.cpp",
"aten/src/TH/THTensorMath.cpp",
"aten/src/TH/THTensorMoreMath.cpp",
"aten/src/ATen/native/utils/Factory.cpp",
"aten/src/ATen/native/xnnpack/Activation.cpp",
"aten/src/ATen/native/xnnpack/ChannelShuffle.cpp",
"aten/src/ATen/native/xnnpack/Convolution.cpp",
"aten/src/ATen/native/xnnpack/AveragePooling.cpp",
"aten/src/ATen/native/xnnpack/Init.cpp",
"aten/src/ATen/native/xnnpack/Linear.cpp",
"aten/src/ATen/native/xnnpack/MaxPooling.cpp",
"aten/src/ATen/native/xnnpack/OpContext.cpp",
"aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp",
"aten/src/ATen/native/xnnpack/Shim.cpp",
# Files not in native, but depends on native symbols
# "aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorIterator.cpp",
"aten/src/ATen/LegacyTHFunctionsCPU.cpp",
"aten/src/ATen/nnapi/nnapi_bind.cpp",
"aten/src/ATen/nnapi/nnapi_wrapper.cpp",
"aten/src/ATen/nnapi/nnapi_model_loader.cpp",
]
# 1. Files in ATen/native with a few exceptions
# TODO: move the exceptions to proper locations
# 2. The whole aten native source list includes the list with and without aten codegen process.
aten_native_source_list = sorted(aten_native_source_non_codegen_list + aten_native_source_codegen_list)
|
py | b406aa5f145e93a7460b63ed71297b6b76a3b54f | from __future__ import absolute_import
import base64
from kombu.serialization import registry, encode, decode
from ..exceptions import SecurityError
from ..utils.encoding import bytes_to_str, str_to_bytes
from .certificate import Certificate, FSCertStore
from .key import PrivateKey
def b64encode(s):
return bytes_to_str(base64.b64encode(str_to_bytes(s)))
def b64decode(s):
return base64.b64decode(str_to_bytes(s))
class SecureSerializer(object):
def __init__(self, key=None, cert=None, cert_store=None,
digest="sha1", serializer="json"):
self._key = key
self._cert = cert
self._cert_store = cert_store
self._digest = digest
self._serializer = serializer
def serialize(self, data):
"""serialize data structure into string"""
assert self._key is not None
assert self._cert is not None
try:
content_type, content_encoding, body = encode(
data, serializer=self._serializer)
# What we sign is the serialized body, not the body itself.
# this way the receiver doesn't have to decode the contents
# to verify the signature (and thus avoiding potential flaws
# in the decoding step).
return self._pack(body, content_type, content_encoding,
signature=self._key.sign(body, self._digest),
signer=self._cert.get_id())
except Exception, exc:
raise SecurityError("Unable to serialize: %r" % (exc, ))
def deserialize(self, data):
"""deserialize data structure from string"""
assert self._cert_store is not None
try:
payload = self._unpack(data)
signature, signer, body = (payload["signature"],
payload["signer"],
payload["body"])
self._cert_store[signer].verify(body,
signature, self._digest)
except Exception, exc:
raise SecurityError("Unable to deserialize: %r" % (exc, ))
return decode(body, payload["content_type"],
payload["content_encoding"], force=True)
def _pack(self, body, content_type, content_encoding, signer, signature,
sep='\x00\x01'):
return b64encode(sep.join([signer, signature,
content_type, content_encoding, body]))
def _unpack(self, payload, sep='\x00\x01',
fields=("signer", "signature", "content_type",
"content_encoding", "body")):
return dict(zip(fields, b64decode(payload).split(sep)))
def register_auth(key=None, cert=None, store=None, digest="sha1",
serializer="json"):
"""register security serializer"""
s = SecureSerializer(key and PrivateKey(key),
cert and Certificate(cert),
store and FSCertStore(store),
digest=digest, serializer=serializer)
registry.register("auth", s.serialize, s.deserialize,
content_type="application/data",
content_encoding="utf-8")
|
py | b406aad2d93338ea3d6ec7471165c0e795b6bb8a | # Import package
import cv2
import math
import numpy as np
import matplotlib as plt
FilePath = '../Images and Videos/theroad.mp4'
cap = cv2.VideoCapture(FilePath)
w = cap.get(3)
h = cap.get(4)
def callback(x):
pass
cv2.namedWindow('cor')
cv2.resizeWindow('cor', 700, 1000)
cv2.createTrackbar('lowh', 'cor', 0, 180, callback)
cv2.createTrackbar('highh', 'cor', 0, 180, callback)
cv2.createTrackbar('lows', 'cor', 0, 255, callback)
cv2.createTrackbar('highs', 'cor', 0, 255, callback)
cv2.createTrackbar('lowv', 'cor', 0, 255, callback)
cv2.createTrackbar('highv', 'cor', 0, 255, callback)
cv2.setTrackbarPos('lowh', 'cor',0)
cv2.setTrackbarPos('lows', 'cor', 0)
cv2.setTrackbarPos('lowv', 'cor', 130)
cv2.setTrackbarPos('highh', 'cor', 255)
cv2.setTrackbarPos('highs', 'cor', 255)
cv2.setTrackbarPos('highv', 'cor', 255)
cv2.createTrackbar('minline', 'cor', 0, 500, callback)
cv2.createTrackbar('maxgap', 'cor', 0, 500, callback)
cv2.setTrackbarPos('minline', 'cor', 10)
cv2.setTrackbarPos('maxgap', 'cor', 20)
cv2.createTrackbar('rad', 'cor', 0, 1800, callback)
cv2.createTrackbar('rad2', 'cor', 0, 1800, callback)
cv2.createTrackbar('width', 'cor', 0, 1800, callback)
cv2.setTrackbarPos('rad', 'cor',958)
cv2.setTrackbarPos('rad2', 'cor', 477)
cv2.setTrackbarPos('width', 'cor', 520)
cv2.createTrackbar('centerX', 'cor', 0, 1500, callback)
cv2.createTrackbar('centerY', 'cor', 0, 1500, callback)
cv2.setTrackbarPos('centerX', 'cor', 640)
cv2.setTrackbarPos('centerY', 'cor', 640)
cv2.createTrackbar('alpha', 'cor', 0,100, callback)
cv2.createTrackbar('beta', 'cor', 0, 100, callback)
cv2.setTrackbarPos('alpha', 'cor', 80)
cv2.setTrackbarPos('beta', 'cor', 100)
cv2.namedWindow('Perspective_transform')
cv2.resizeWindow('Perspective_transform', 700, 1000)
# 1280 -> width and 720 -> Height
cv2.createTrackbar('src_x1', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('src_y1', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('src_x2', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('src_y2', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('src_x3', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('src_y3', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('src_x4', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('src_y4', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('dist_x1', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('dist_y1', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('dist_x2', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('dist_y2', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('dist_x3', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('dist_y3', 'Perspective_transform', 0, 720, callback)
cv2.createTrackbar('dist_x4', 'Perspective_transform', 0, 1280, callback)
cv2.createTrackbar('dist_y4', 'Perspective_transform', 0, 720, callback)
cv2.setTrackbarPos('src_x1', 'Perspective_transform', 523)
cv2.setTrackbarPos('src_y1', 'Perspective_transform', 453)
cv2.setTrackbarPos('src_x2', 'Perspective_transform', 811)
cv2.setTrackbarPos('src_y2', 'Perspective_transform', 440)
cv2.setTrackbarPos('src_x3', 'Perspective_transform', 405)
cv2.setTrackbarPos('src_y3', 'Perspective_transform', 639)
cv2.setTrackbarPos('src_x4', 'Perspective_transform', 1261)
cv2.setTrackbarPos('src_y4', 'Perspective_transform', 671)
cv2.setTrackbarPos('dist_x1', 'Perspective_transform', 160)
cv2.setTrackbarPos('dist_y1', 'Perspective_transform', 93)
cv2.setTrackbarPos('dist_x2', 'Perspective_transform', 1200)
cv2.setTrackbarPos('dist_y2', 'Perspective_transform', 0)
cv2.setTrackbarPos('dist_x3', 'Perspective_transform', 200)
cv2.setTrackbarPos('dist_y3', 'Perspective_transform', 710)
cv2.setTrackbarPos('dist_x4', 'Perspective_transform', 1200)
cv2.setTrackbarPos('dist_y4', 'Perspective_transform', 710)
def automatic_canny(images, sigma=0.33):
median = np.median(images)
## Based on some statistics
lower = int(max(0, (1-sigma)*median))
upper = int(min(255, (1+sigma)*median))
edge = cv2.Canny(images, lower, upper,3)
return edge
def perspectiveWarp(inpImage):
# Get image size
img_size = (inpImage.shape[1], inpImage.shape[0])
src_x1 = cv2.getTrackbarPos('src_x1','Perspective_transform')
src_y1 = cv2.getTrackbarPos('src_y1','Perspective_transform')
src_x2 = cv2.getTrackbarPos('src_x2','Perspective_transform')
src_y2 = cv2.getTrackbarPos('src_y2','Perspective_transform')
src_x3 = cv2.getTrackbarPos('src_x3','Perspective_transform')
src_y3 = cv2.getTrackbarPos('src_y3','Perspective_transform')
src_x4 = cv2.getTrackbarPos('src_x4','Perspective_transform')
src_y4 = cv2.getTrackbarPos('src_y4','Perspective_transform')
dist_x1 = cv2.getTrackbarPos('dist_x1','Perspective_transform')
dist_y1 = cv2.getTrackbarPos('dist_y1','Perspective_transform')
dist_x2 = cv2.getTrackbarPos('dist_x2','Perspective_transform')
dist_y2 = cv2.getTrackbarPos('dist_y2','Perspective_transform')
dist_x3 = cv2.getTrackbarPos('dist_x3','Perspective_transform')
dist_y3 = cv2.getTrackbarPos('dist_y3','Perspective_transform')
dist_x4 = cv2.getTrackbarPos('dist_x4','Perspective_transform')
dist_y4 = cv2.getTrackbarPos('dist_y4','Perspective_transform')
# Perspective points to be warped
src = np.float32([[src_x1,src_y1],
[src_x2, src_y2],
[src_x3, src_y3],
[src_x4, src_y4]])
# Window to be shown
dst = np.float32([[dist_x1,dist_y1],
[dist_x2,dist_y2],
[dist_x3,dist_y3],
[dist_x4,dist_y4]])
# Matrix to warp the image for birdseye window
matrix = cv2.getPerspectiveTransform(src, dst)
# Inverse matrix to unwarp the image for final window
minv = cv2.getPerspectiveTransform(dst, src)
birdseye = cv2.warpPerspective(inpImage, matrix, img_size)
# Get the birdseye window dimensions
height, width = birdseye.shape[:2]
# Divide the birdseye view into 2 halves to separate left & right lanes
birdseyeLeft = birdseye[0:height, 0:width // 2]
birdseyeRight = birdseye[0:height, width // 2:width]
return birdseye, birdseyeLeft, birdseyeRight, minv
while True:
_, img = cap.read()
# Masking
lowh = cv2.getTrackbarPos('lowh','cor')
lows = cv2.getTrackbarPos('lows','cor')
lowv = cv2.getTrackbarPos('lowv','cor')
highh = cv2.getTrackbarPos('highh','cor')
highs = cv2.getTrackbarPos('highs','cor')
highv = cv2.getTrackbarPos('highv','cor')
# For ellipse (center cordinates)
centerX = cv2.getTrackbarPos('centerX','cor')
centerY = cv2.getTrackbarPos('centerY','cor')
# addWeighted parameters
alpha = cv2.getTrackbarPos('alpha','cor')
beta = cv2.getTrackbarPos('beta','cor')
# Hide corner using ellipse
rad = cv2.getTrackbarPos('rad', 'cor')
rad2 = cv2.getTrackbarPos('rad2', 'cor')
width = cv2.getTrackbarPos('width', 'cor')
# define range of white color in HSV (Change the value for another color using trackbar)
lower_red = np.array([lowh,lows,lowv])
upper_red = np.array([highh,highs,highv])
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('hsv',hsv)
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_red, upper_red)
cv2.imshow('mask',mask)
# Make ellipse to hide (black out) upper region and only focus on the road part
cv2.ellipse(mask, (640,640), (rad, rad2), 0, 0, 360, (0, 0, 0), width)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img , img, mask = mask)
cv2.imshow('res',res)
# Grayscale
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray',gray)
# Gaussian Blur (Remove noise)
gray_blur = cv2.GaussianBlur(gray,(3, 3), 0)
# Canny edge
edges = automatic_canny(gray_blur)
cv2.imshow('Canny edge', edges)
# Thresolding (Binary image)
ret, thresh = cv2.threshold(edges,125, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('thresold',thresh)
# Define kernel size
kernel = np.ones((10,10), np.uint8)
# Apply closing
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
cv2.imshow('closing', closing)
# Data loader for hough transform
rho = 1
theta = np.pi/180
threshold = 50
min_line_len = cv2.getTrackbarPos('minline', 'cor')
max_line_gap = cv2.getTrackbarPos('maxgap', 'cor')
lines = cv2.HoughLinesP(closing, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((closing.shape[0], closing.shape[1], 3), dtype=np.uint8)
if lines is not None:
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_img, (x1, y1), (x2, y2), [0,0,255],3)
# Merge the image with the lines onto the original.
# img = img * α + line_img * β + γ
# NOTE: img and line_img must be the same shape!
alpha = alpha / 100 if alpha > 0 else 0.01
beta = beta / 100 if beta > 0 else 0.01
img = cv2.addWeighted(img, alpha, line_img, beta, 0.0)
birdView, birdViewL, birdViewR, minverse = perspectiveWarp(img)
cv2.imshow('birdView',birdView)
cv2.imshow('birdViewL',birdViewL)
cv2.imshow('birdViewR',birdViewR)
cv2.imshow('line_img',line_img)
'''
# Apply contour to get the bounding box on the lane
contours, hierarchy=cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in contours:
area = cv2.contourArea(i)
if(area>10000):
x,y,w,h = cv2.boundingRect(i)
rect = cv2.minAreaRect(i)
box = cv2.boxPoints(rect)
box = np.int0(box)
#cv2.drawContours(img,[box],0,(255,0,0),4)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),4)
cv2.putText(img,"Lane detected",(x,y),cv2.FONT_HERSHEY_SIMPLEX,4, (0,255,0),cv2.LINE_AA)
'''
cv2.imshow('Output', img)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
py | b406ac0f2c1e0954f5d0cf3260aecc4eaee5e4e9 | # Copyright 2022 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for calculating bath dynamics as outlined in [Gribben2021].
**[Gribben2021]**
D. Gribben, A. Strathearn, G. E. Fux, P. Kirton, and B. W. Lovett,
*Using the Environment to Understand non-Markovian Open Quantum Systems*,
arXiv:2106.04212 [quant-ph] (2021).
"""
from typing import Optional, Text, Tuple
import numpy as np
from numpy import ndarray
from oqupy.base_api import BaseAPIClass
from oqupy.process_tensor import BaseProcessTensor
from oqupy.bath import Bath
from oqupy.system import BaseSystem
from oqupy.config import NpDtype
from oqupy.contractions import compute_correlations
class TwoTimeBathCorrelations(BaseAPIClass):
"""
Class to facilitate calculation of two-time bath correlations.
Parameters
----------
system: BaseSystem
The system.
bath: Bath
The bath object containing all coupling information and temperature.
process_tensor: ProcessTensor
The corresponding process tensor calculated for the given bath.
initial_state: ndarray
Initial state of the system.
system_correlations: ndarray
Optional previously calculated system correlations. This must
be an upper triangular array with all ordered correlations up to a
certain time.
name: str
An optional name for the bath dynamics object.
description: str
An optional description of the bath dynamics object.
"""
def __init__(
self,
system: BaseSystem,
bath: Bath,
process_tensor: BaseProcessTensor,
initial_state: Optional[ndarray] = None,
system_correlations: Optional[ndarray] = None,
name: Optional[Text] = None,
description: Optional[Text] = None) -> None:
"""Create a TwoTimeBathCorrelations object."""
self._system = system
self._bath = bath
initial_tensor = process_tensor.get_initial_tensor()
assert (initial_state is None) ^ (initial_tensor is None), \
"Initial state must be either (exclusively) encoded in the " \
+ "process tensor or given as an argument."
self._process_tensor = process_tensor
self._initial_state = initial_state
if system_correlations is None:
self._system_correlations = np.array([[]], dtype=NpDtype)
else:
self._system_correlations = system_correlations
self._temp = bath.correlations.temperature
self._bath_correlations = {}
super().__init__(name, description)
@property
def system(self) -> BaseSystem:
"""The system. """
return self._system
@property
def bath(self) -> Bath:
"""The bath. """
return self._bath
@property
def initial_state(self) -> ndarray:
"""The initial system state. """
return self._initial_state
def generate_system_correlations(
self,
final_time: float,
progress_type: Optional[Text] = None) -> None:
r"""
Function to generate all ordered system correlations up to a given time
using the process tensor.
Parameters
----------
final_time: float
The latest time appearing in the generated system correlation
functions.
progress_type: str (default = None)
The progress report type during the computation. Types are:
{``silent``, ``simple``, ``bar``}. If `None` then
the default progress type is used.
"""
dt = self._process_tensor.dt
corr_mat_dim = int(np.round(final_time/dt))
current_corr_dim = self._system_correlations.shape[0]
times_a = slice(corr_mat_dim)
if self._system_correlations.size == 0:
times_b = slice(corr_mat_dim)
else:
times_b = slice(current_corr_dim, corr_mat_dim)
dim_diff = corr_mat_dim - current_corr_dim
if dim_diff > 0:
coup_op = self.bath.unitary_transform \
@ self.bath.coupling_operator \
@ self.bath.unitary_transform.conjugate().T
_,_,_new_sys_correlations = \
compute_correlations(self.system,
self._process_tensor,
coup_op, coup_op,
times_a, times_b,
initial_state = self.initial_state,
progress_type=progress_type)
self._system_correlations = np.pad(self._system_correlations,
((0, dim_diff), (0, 0)),
'constant',
constant_values = np.nan)
self._system_correlations = np.append(self._system_correlations,
_new_sys_correlations,
axis = 1)
def occupation(
self,
freq: float,
dw: Optional[float] = 1.0,
change_only: Optional[bool] = False,
progress_type: Optional[Text] = None) -> Tuple[ndarray, ndarray]:
r"""
Function to calculate the change in bath occupation in a particular
bandwidth.
Parameters
----------
freq: float
Central frequency of the frequency band.
dw: float
Width of the the frequency band. By default this method returns a
a *density* by setting the frequency band `dw=1.0`.
change_only: bool
Option to include the initial occupation (density) in the result.
progress_type: str (default = None)
The progress report type during the computation. Types are:
{``silent``, ``simple``, ``bar``}. If `None` then
the default progress type is used.
Returns
-------
times: ndarray
Times of the occupation dynamics.
bath_occupation: ndarray
Occupation (density) (difference) of the bath in the specified
frequency band.
"""
corr_mat_dim = len(self._process_tensor)
dt = self._process_tensor.dt
last_time = corr_mat_dim * dt
tlist = np.arange(0, last_time+dt, dt)
if freq == 0:
return tlist, np.ones(len(tlist),
dtype=NpDtype) * (np.nan + 1.0j*np.nan)
self.generate_system_correlations(last_time, progress_type)
_sys_correlations = self._system_correlations[:corr_mat_dim,
:corr_mat_dim]
_sys_correlations = np.nan_to_num(_sys_correlations)
last_time = len(self._process_tensor) * self._process_tensor.dt
re_kernel, im_kernel = self._calc_kernel(freq, last_time,
freq, last_time, (1, 0))
coup = self._bath.correlations.spectral_density(freq) * dw
bath_occupation = np.cumsum(
np.sum(_sys_correlations.real*re_kernel \
+ 1j*_sys_correlations.imag*im_kernel, axis = 0)
).real * coup
bath_occupation = np.append([0], bath_occupation)
if not change_only and self._temp > 0:
bath_occupation += np.exp(-freq/self._temp) \
/ (1 - np.exp(-freq/self._temp))
return tlist, bath_occupation
def correlation(
self,
freq_1: float,
time_1: float,
freq_2: Optional[float] = None,
time_2: Optional[float] = None,
dw: Optional[tuple] = (1.0, 1.0),
dagg: Optional[tuple] = (1, 0),
interaction_picture: Optional[bool] = False,
change_only: Optional[bool] = False,
progress_type: Optional[Text] = None) -> complex:
r"""
Function to calculate two-time correlation function between two
frequency bands of a bath.
The calculation consists of a double integral of the form:
.. math::
\int_0^t \int_0^{t'} \left\{
\mathrm{Re} \langle O(t')O(t'') \rangle \, K_R(t',t'')
+ i \,\mathrm{Im} \langle O(t')O(t'') \rangle \, K_I(t',t'')
\right\} dt'' dt'
where :math:`O` is the system operator coupled to the bath and
:math:`K_R` and :math:`K_I` are generally piecewise kernels which
depend on the exact bath correlation function desired.
Parameters
----------
freq_1: float
Frequency of the earlier time operator.
time_1: float
Time the earlier operator acts.
freq_2: float
Frequency of the later time operator. If set to None will default
to freq_2=freq_1.
time_2: float
Time the later operator acts. If set to None will default to
time_2=time_1.
dw: tuple
Width of the the frequency bands. By default this method returns a
correlation *density* by setting the frequency bands to
`dw=(1.0, 1.0)`.
dagg: tuple
Determines whether each operator is daggered or not e.g. (1,0)
would correspond to :math:`< a^\dagger a >`.
interaction_picture: bool
Option whether to generate the result within the bath interaction
picture.
change_only: bool
Option to include the initial occupation in the result.
progress_type: str (default = None)
The progress report type during the computation. Types are:
{``silent``, ``simple``, ``bar``}. If `None` then
the default progress type is used.
Returns
-------
correlation : complex
Bath correlation function
<a^{dagg[0]}_{freq_2} (time_2) a^{dagg[1]}_{freq_1} (time_1)>
"""
dt = self._process_tensor.dt
if time_2 is None:
time_2 = time_1
if freq_2 is None:
freq_2 = freq_1
self.generate_system_correlations(time_2, progress_type)
corr_mat_dim = int(np.round(time_2/dt))
_sys_correlations = self._system_correlations[:corr_mat_dim,
:corr_mat_dim]
_sys_correlations = np.nan_to_num(_sys_correlations)
re_kernel,im_kernel = self._calc_kernel(freq_1, time_1,
freq_2, time_2, dagg)
coup_1 = dw[0] * self._bath.correlations.spectral_density(freq_1)**0.5
coup_2 = dw[1] * self._bath.correlations.spectral_density(freq_2)**0.5
correlation = np.sum(_sys_correlations.real*re_kernel + \
1j*_sys_correlations.imag*im_kernel) * \
coup_1 * coup_2
if (not change_only) and (freq_1 == freq_2) \
and (dagg in ((1, 0), (0, 1))):
if self._temp > 0:
correlation += np.exp(-freq_1/self._temp) \
/ (1 - np.exp(-freq_1/self._temp))
if dagg == (0, 1):
correlation += 1
if not interaction_picture:
correlation *= np.exp(1j * ((2*dagg[0] - 1) * freq_2 * time_2 + \
(2*dagg[1] - 1) * freq_1 * time_1))
return correlation
def _calc_kernel(self,
freq_1: float,
time_1: float,
freq_2: float,
time_2: float,
dagg: tuple
) -> Tuple[ndarray, ndarray]:
r"""
Function to calculate the corresponding kernel for the desired
correlation function.
Parameters
----------
freq_1 : float
Frequency of the earlier time operator.
time_1 : float
Time the earlier operator acts.
freq_2 : float
Frequency of the later time operator.
time_2 : float
Time the later operator acts.
dagg : tuple
Determines whether each operator is daggered or not e.g. (1,0)
would correspond to :math:`< a^\dagger a >`
Returns
-------
re_kernel : ndarray
An array that multiplies the real part of the system correlation
functions before being summed.
im_kernel : ndarray
An array that multiplies the imaginary part of the system
correlation functions before being summed.
The general structure of the kernel is piecewise and different for the
real and imaginary parts of the correlation function. To accommodate
the most general case we split the integrals up in the following way:
.. math::
\int_0^t \int_0^t' = \int_0^{t_1} \int_0^{t'}+
\int_{t_1}^{t} \int_0^{t_1}+
\int_{t_1}^{t} \int_{t_1}^{t'}
where :math:`t_1` is the time the earlier operator acts. We will refer
to these as regions `a`, `b` and `c` in the code. In the actual
implementation we build the kernel for the full square integration
region and then simply keep the upper triangular portion of the matrix.
"""
dt = self._process_tensor.dt
#pieces of kernel consist of some combination of phases and
#Bose-Einstein factors
n_1, n_2 = 0, 0
if self._temp > 0:
n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))
n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))
ker_dim = int(np.round(time_2 / dt))
# calculate index corresponding to t_1
switch = int(np.round(time_1 / dt))
re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)
im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)
tpp_index, tp_index = np.meshgrid(
np.arange(ker_dim), np.arange(ker_dim),
indexing='ij') #array of indices for each array element
regions = {
'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)
'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)
'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)
def phase(region, swap_ts = False):
tk = tp_index[regions[region]]
tkp = tpp_index[regions[region]]
if tk.size == 0 or tkp.size == 0:
return 0
a = -1j * ((2*dagg[0] - 1)) * freq_2
b = -1j * ((2*dagg[1] - 1)) * freq_1
if swap_ts:
a, b = b, a
if region in ('a','c'):
ph = np.triu(
np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)
ph -= np.triu(
np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)
ph -= np.triu(
np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)
ph += np.triu(
np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)
sel = np.diag(tk)
di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)
if a + b != 0:
di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))
di += np.exp((a + b) * sel * dt) / (a * (a+b))
else:
di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)
ph += np.diag(di)
else:
ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)
ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)
ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)
ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)
return ph
if dagg == (0, 1):
re_kernel[regions['a']] = phase('a') + phase('a', 1)
re_kernel[regions['b']] = phase('b')
im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -
(2*n_2 + 1) * phase('a', 1))
im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')
im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')
elif dagg == (1, 0):
re_kernel[regions['a']] = phase('a') + phase('a', 1)
re_kernel[regions['b']] = phase('b')
im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -
(2*n_2 + 1) * phase('a', 1))
im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')
im_kernel[regions['c']] = 2 * n_1 * phase('c')
elif dagg == (1, 1):
re_kernel[regions['a']] = -(phase('a') + phase('a', 1))
re_kernel[regions['b']] = -phase('b')
im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +
(2*n_2 + 1) * phase('a', 1))
im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')
im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')
elif dagg == (0, 0):
re_kernel[regions['a']] = -(phase('a') + phase('a', 1))
re_kernel[regions['b']] = -phase('b')
im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +
(2*n_1 + 1) * phase('a'))
im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')
im_kernel[regions['c']] = -2 * n_1 * phase('c')
re_kernel = np.triu(re_kernel) #only keep triangular region
im_kernel = np.triu(im_kernel)
return re_kernel, im_kernel
|
py | b406ac3eed1ba327060b05f34a41fea08ae1015a | """
NLP Sandbox API
NLP Sandbox REST API # noqa: E501
The version of the OpenAPI document: 1.2.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from nlpsandbox.api_client import ApiClient, Endpoint as _Endpoint
from nlpsandbox.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from nlpsandbox.model.deidentify_request import DeidentifyRequest
from nlpsandbox.model.deidentify_response import DeidentifyResponse
from nlpsandbox.model.error import Error
class DeidentifiedNoteApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_deidentified_notes(
self,
**kwargs
):
"""Deidentify a clinical note # noqa: E501
Returns the deidentified note # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_deidentified_notes(async_req=True)
>>> result = thread.get()
Keyword Args:
deidentify_request (DeidentifyRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DeidentifyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.create_deidentified_notes = _Endpoint(
settings={
'response_type': (DeidentifyResponse,),
'auth': [],
'endpoint_path': '/deidentifiedNotes',
'operation_id': 'create_deidentified_notes',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'deidentify_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'deidentify_request':
(DeidentifyRequest,),
},
'attribute_map': {
},
'location_map': {
'deidentify_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_deidentified_notes
)
|
py | b406ad2c4561f94b7681640fab2763cb7e4b510f | from setuptools import setup
setup(
name='hail',
version='0.3b2',
py_modules=["hail"],
author='Elisey Zanko',
author_email='[email protected]',
description='Pythonic bindings for the Apache Storm UI REST API',
license='BSD-3-Clause',
url='https://github.com/31z4/hail',
test_suite='tests',
tests_require='waiting'
)
|
py | b406ad31cc6f7e3fc7bb1376e33b298352fe3590 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test uncertainty toolbox """
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as CV
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore import dtype as mstype
from mindspore.common.initializer import TruncatedNormal
from mindspore.dataset.vision import Inter
from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation
from mindspore.train import load_checkpoint, load_param_into_net
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
def __init__(self, num_class=10, channel=1):
super(LeNet5, self).__init__()
self.num_class = num_class
self.conv1 = conv(channel, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, self.num_class)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
def create_dataset(data_path, batch_size=32, repeat_size=1,
num_parallel_workers=1):
"""
create dataset for train or test
"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# apply map operations on images
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
# apply DatasetOps
buffer_size = 10000
mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
mnist_ds = mnist_ds.repeat(repeat_size)
return mnist_ds
if __name__ == '__main__':
# get trained model
network = LeNet5()
param_dict = load_checkpoint('checkpoint_lenet.ckpt')
load_param_into_net(network, param_dict)
# get train and eval dataset
ds_train = create_dataset('workspace/mnist/train')
ds_eval = create_dataset('workspace/mnist/test')
evaluation = UncertaintyEvaluation(model=network,
train_dataset=ds_train,
task_type='classification',
num_classes=10,
epochs=1,
epi_uncer_model_path=None,
ale_uncer_model_path=None,
save_model=False)
for eval_data in ds_eval.create_dict_iterator(output_numpy=True, num_epochs=1):
eval_data = Tensor(eval_data['image'], mstype.float32)
epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data)
aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data)
|
py | b406ad612404e95430cba12afa236665470ceb7b | # -*- coding: utf-8 -*-
#
# Poio Tools for Linguists
#
# Copyright (C) 2009-2013 Poio Project
# Author: António Lopes <[email protected]>
# URL: <http://media.cidles.eu/poio/>
# For license information, see LICENSE.TXT
"""
"""
from __future__ import absolute_import
import re
import xml.etree.ElementTree as ET
import poioapi.io.graf
class Parser(poioapi.io.graf.BaseParser):
def __init__(self, filepath):
"""Class's constructor.
Parameters
----------
filepath : str
Path of the Toolbox XML file.
"""
self.filepath = filepath
self.parse()
def parse(self):
"""This method will parse the input file.
"""
root = ET.parse(self.filepath)
tree = root.getroot()
self._current_id = 0
self._elements_map = {"ref": [], "t": {}, "m": {},
"g": {}, "p": {}, "f": {}}
self.parse_element_tree(tree)
def parse_element_tree(self, tree):
"""
tag name and value represent the title
ref represents the
"""
for t in tree:
if t.tag == "ref":
self._current_ref = t.attrib['value']
self._elements_map["ref"].append({"id":self._current_ref, "value":""})
elif t.tag == "t":
self._current_t = self._next_id()
self._add_elment_to_elements(t, self._current_t, self._current_ref,
t.attrib['value'])
self._add_phrase(t.attrib['value'])
elif t.tag == "p":
if t.text and "-" not in t.text:
self._add_elment_to_elements(t, self._next_id(), self._current_t,
t.text)
elif t.tag == "m":
self._current_m = self._next_id()
self._add_elment_to_elements(t, self._current_m, self._current_t,
t.attrib['value'])
elif t.tag == "g":
self._add_elment_to_elements(t, self._next_id(), self._current_m, t.text)
elif t.tag == "name":
self.meta_information = t.attrib["value"]
if len(t.getchildren()) > 0:
self.parse_element_tree(t)
def _add_phrase(self, value):
for ref in self._elements_map["ref"]:
if ref["id"] == self._current_ref:
ref["value"] += value + " "
def _add_elment_to_elements(self, t, id, parent=None, value=None, features=None, region=None):
if (t.tag, parent) in self._elements_map:
self._elements_map[(t.tag, parent)].append(
{"id": id, "value": value, "region": region, "features": features})
else:
self._elements_map[(t.tag, parent)] = [{"id": id, "value": value,
"region": region,
"features": features}]
def get_root_tiers(self):
return [poioapi.io.graf.Tier("ref")]
def get_child_tiers_for_tier(self, tier):
if tier.name == "ref":
return [poioapi.io.graf.Tier("t")]
if tier.name == "t":
return [poioapi.io.graf.Tier("p"),
poioapi.io.graf.Tier("m")]
if tier.name == "m":
return [poioapi.io.graf.Tier("g")]
def get_annotations_for_tier(self, tier, annotation_parent=None):
if tier.name == "ref":
return [poioapi.io.graf.Annotation(e["id"], e['value'])
for e in self._elements_map[tier.name]]
else:
if (tier.name, annotation_parent.id) in self._elements_map:
return [poioapi.io.graf.Annotation(e["id"], e["value"],
e["features"])
for e in self._elements_map[(tier.name, annotation_parent.id)]]
else:
return []
def tier_has_regions(self, tier):
#if tier.name == "t":
# return True
return False
def region_for_annotation(self, annotation):
idGroup = [value for key, value in self._elements_map.items()
if "idGroup" in key]
for elements in idGroup:
for e in elements:
if e["id"] == annotation.id:
return e["region"]
return None
def get_primary_data(self):
"""This method gets the information about
the source data file.
Returns
-------
primary_data : object
PrimaryData object.
"""
primary_data = poioapi.io.graf.PrimaryData()
primary_data.type = poioapi.io.graf.NONE
primary_data.filename = "unknown"
return primary_data
def _next_id(self):
current_id = str(int(self._current_id) + 1)
self._current_id = current_id
return current_id
def _split_region(self, element):
try:
aud = element.find("aud").text
results = re.findall("\d*\.\d+|\d+", aud)
region = (results[-2], results[-1])
value = aud.split(results[-2])[0]
except:
value = None
region = None
return value, region |
py | b406adc1ea4947d7c86c7cc1b9eb58f49f219f0a | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NumPy like wrapper for Tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import newaxis
from trax.tf_numpy.numpy import random
# pylint: disable=wildcard-import
from trax.tf_numpy.numpy.array_creation import *
from trax.tf_numpy.numpy.array_manipulation import *
from trax.tf_numpy.numpy.array_methods import *
from trax.tf_numpy.numpy.arrays import ndarray
from trax.tf_numpy.numpy.dtypes import *
from trax.tf_numpy.numpy.logic import *
from trax.tf_numpy.numpy.math import *
from trax.tf_numpy.numpy.utils import finfo
# pylint: enable=wildcard-import
|
py | b406b128e24a3321bc45287a84d0ef982e206f16 | # qubit number=5
# total number=54
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=48
prog.z(input_qubit[1]) # number=49
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.cx(input_qubit[3],input_qubit[2]) # number=45
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.x(input_qubit[3]) # number=46
prog.y(input_qubit[1]) # number=47
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1380.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | b406b188e356e4371a37e7cf683042a04e01ced3 | from setuptools import setup
setup(
name="pytodotxt",
version="0.1",
author="senft",
author_email="[email protected]",
description=("A simple parser for todo.txt files."),
license="GPL",
url="https://github.com/senft/pytodotxt",
py_modules=['pytodotxt'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
],
)
|
py | b406b1db25c9667b17737e491871919ee6325496 | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pytest
def model_path(is_myriad=False):
path_to_repo = os.environ["MODELS_PATH"]
if not is_myriad:
test_xml = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp32.xml')
test_bin = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp32.bin')
else:
test_xml = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp16.xml')
test_bin = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp16.bin')
return (test_xml, test_bin)
def model_onnx_path():
path_to_repo = os.environ["MODELS_PATH"]
test_onnx = os.path.join(path_to_repo, "models", "test_model", 'test_model.onnx')
return test_onnx
def model_prototxt_path():
path_to_repo = os.environ["MODELS_PATH"]
test_prototxt = os.path.join(path_to_repo, "models", "test_model", 'test_model.prototxt')
return test_prototxt
def image_path():
path_to_repo = os.environ["DATA_PATH"]
path_to_img = os.path.join(path_to_repo, 'validation_set', '224x224', 'dog.bmp')
return path_to_img
def plugins_path():
path_to_repo = os.environ["DATA_PATH"]
plugins_xml = os.path.join(path_to_repo, 'ie_class', 'plugins.xml')
plugins_win_xml = os.path.join(path_to_repo, 'ie_class', 'plugins_win.xml')
plugins_osx_xml = os.path.join(path_to_repo, 'ie_class', 'plugins_apple.xml')
return (plugins_xml, plugins_win_xml, plugins_osx_xml)
@pytest.fixture(scope='session')
def device():
return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU"
|
py | b406b296f0e632b714fe31999ac7ea5da224c845 | import tensorflow as tf
from keras import keras_parameterized, testing_utils
from ..upconv import UpConvBlock
@keras_parameterized.run_all_keras_modes
class TestUpConvBlock(keras_parameterized.TestCase):
def test_layer(self):
testing_utils.layer_test(
UpConvBlock,
kwargs={'filters': 1, 'up_scale': 2},
input_shape=[2, 16, 16, 3],
input_dtype='float32',
expected_output_shape=[None, 64, 64, 1],
expected_output_dtype='float32'
)
testing_utils.layer_test(
UpConvBlock,
kwargs={'filters': 2, 'up_scale': 2},
input_shape=[2, 16, 16, 3],
input_dtype='float32',
expected_output_shape=[None, 64, 64, 2],
expected_output_dtype='float32'
)
if __name__ == '__main__':
tf.test.main()
|
py | b406b468a3f86c882208333f55d77945ebdaf72f | # Copyright 2015 PLUMgrid, Inc. All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutronclient.common import extension
from neutronclient.i18n import _
class TransitDomain(extension.NeutronClientExtension):
resource = 'transit_domain'
resource_plural = 'transit_domains'
path = 'transit-domains'
object_path = '/%s' % path
resource_path = '/%s/%%s' % path
versions = ['2.0']
def args2body(self, parsed_args):
try:
if parsed_args.name:
tvd_name = parsed_args.name
body = {'transit_domain': {'name': tvd_name}}
else:
body = {'transit_domain': {}}
return body
except KeyError as err:
raise Exception("KeyError: " + str(err))
class TransitDomainCreate(extension.ClientExtensionCreate,
TransitDomain):
"""Create a transit domain."""
shell_command = 'transit-domain-create'
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='<TRANSIT-DOMAIN-NAME>',
help=_('Descriptive name for transit domain.'))
def args2body(self, parsed_args):
body = args2body(self, parsed_args)
if parsed_args.tenant_id:
(body['transit_domain']
['tenant_id']) = parsed_args.tenant_id
return body
class TransitDomainList(extension.ClientExtensionList,
TransitDomain):
"""List transit domains"""
shell_command = 'transit-domain-list'
list_columns = ['id', 'name']
pagination_support = True
sorting_support = True
class TransitDomainShow(extension.ClientExtensionShow,
TransitDomain):
"""Show information of a given transit domain"""
shell_command = 'transit-domain-show'
class TransitDomainDelete(extension.ClientExtensionDelete,
TransitDomain):
"""Delete a given transit domain"""
shell_command = 'transit-domain-delete'
class TransitDomainUpdate(extension.ClientExtensionUpdate,
TransitDomain):
"""Update a given transit domain"""
shell_command = 'transit-domain-update'
def add_known_arguments(self, parser):
parser.add_argument(
'--name', metavar='name',
help=_('Descriptive name for transit domain'))
def args2body(self, parsed_args):
body = {'transit_domain': {'name': parsed_args.name}}
return body
|
py | b406b50728d8af859c9f484f0607c64b739e8127 | from password_generator.Database.database import Database
from datetime import date
class My_Space:
def __init__(self):
self.d = Database()
def create_space(self, username):
flag = True
if flag:
query = "create table `my_space`.`{}`(account_name varchar(45) primary key not null, user_pass varchar(120) not null, date varchar(45));".format(username)
db = self.d.connect()
result = self.d.execute(query)
self.d.close(db)
return flag
def add_password(self, username, account_name, user_pass):
#insert password into the database
query = "insert into `my_space`.`{}` values('{}', '{}', '{}');".format(username, account_name, user_pass.decode(), str(date.today()))
db = self.d.connect()
result = self.d.execute(query)
self.d.close(db)
return result
def show_passwords(self, username):
query = "select * from `my_space`.`{}`;".format(username)
db = self.d.connect()
result = self.d.fetch_multiple(query)
self.d.close(db)
return result
def delete_passwords(self, username, account_name):
query = "delete from `my_space`.`{}` where account_name='{}';".format(username, account_name)
db = self.d.connect()
result = self.d.execute(query)
self.d.close(db)
return result
#Driver Code
#m = My_Space()
#username, account_name, user_pass = '', '', ''
#print(m.create_space(username))
#print(m.add_password(username, account_name, user_pass))
#print(m.show_passwords(username))
#print(m.delete_passwords(username, account_name)) |
py | b406b6625a11c65302e42ad8e15f15275b17895a | import re
from nlputils import *
from udb import *
###############################################################################
### TDT4 internal format:
###
### format = 'TDT4'
### bmeta = { sfile, ... }
### chunks = []
### chunk = [ text, cmeta={ stype,slang,sorg,date,btopic,ntopic, ... } ]
###
### Notes:
### 1. text: text contained in the <TEXT>..</TEXT> element
### 2. cmeta{}: dictionary for storing chunk meta-data
### 3. bmeta{}: dictionary for storing bundle meta-data
###
###############################################################################
### regex templates to extract fields from TDT4 source data
RX_DOC = "<DOC>(.*?)</DOC>"
RX_STYPE = "<SOURCE_TYPE>(.*?)</SOURCE_TYPE>"
RX_SLANG = "<SOURCE_LANG>(.*?)</SOURCE_LANG>"
RX_SORG = "<SOURCE_ORG>(.*?)</SOURCE_ORG>"
RX_DDATE = "<DOC_DATE>(.*?)</DOC_DATE>"
RX_BTOPIC = "<BROAD_TOPIC>(.*?)</BROAD_TOPIC>"
RX_NTOPIC = "<NARROW_TOPIC>(.*?)</NARROW_TOPIC>"
RX_TEXT = "<TEXT>(.*?)</TEXT>"
###############################################################################
### Parse TDT4 source content and return as a UDB encapsulated TDT4 object
###############################################################################
def parse_to_udb (src_data, src_mdata, options):
# options are unused at this time
bundle = udb()
bundle.format = 'TDT4' # TDT4 internal format
bundle.bmeta = src_mdata
### extract chunks and meta data and insert into UDB bundle
### interate to extract DOC elements
rx_doc = re.compile(RX_DOC,re.DOTALL)
iter = rx_doc.finditer(src_data)
for match in iter:
doc = match.group(1)
chunk=[]
cmeta = {} # cmeta dictionary
### find SOURCE_TYPE element
rx_stype = re.compile(RX_STYPE, re.DOTALL)
stype = rx_stype.search(doc)
if stype != None:
cmeta['stype'] = stype.group(1)
else:
cmeta['stype'] = None
print "Warning: SOURCE_TYPE missing in DOC"
### find SOURCE_LANG element
rx_slang = re.compile(RX_SLANG, re.DOTALL)
slang = rx_slang.search(doc)
if slang != None:
cmeta['slang'] = slang.group(1)
else:
cmeta['slang'] = None
print "Warning: SOURCE_LANG missing in DOC"
### find SOURCE_ORG element
rx_sorg = re.compile(RX_SORG, re.DOTALL)
sorg = rx_sorg.search(doc)
if sorg != None:
cmeta['sorg'] = sorg.group(1)
else:
cmeta['sorg'] = None
print "Warning: SOURCE_ORG missing in DOC"
### find DOC_DATE element
rx_ddate = re.compile(RX_DDATE, re.DOTALL)
ddate = rx_ddate.search(doc)
if ddate != None:
cmeta['ddate'] = ddate.group(1)
else:
cmeta['ddate'] = None
print "Warning: DOC_DATE missing in DOC"
### find BROAD_TOPIC element
rx_btopic = re.compile(RX_BTOPIC, re.DOTALL)
btopic = rx_btopic.search(doc)
if btopic != None:
cmeta['btopic'] = btopic.group(1)
else:
cmeta['btopic'] = None
print "Warning: BROAD_TOPIC missing in DOC"
### find NARROW_TOPIC element
rx_ntopic = re.compile(RX_NTOPIC, re.DOTALL)
ntopic = rx_ntopic.search(doc)
if ntopic != None:
cmeta['ntopic'] = ntopic.group(1)
else:
cmeta['ntopic'] = None
print "Warning: NARROW_TOPIC missing in DOC"
### find TEXT element
rx_text = re.compile(RX_TEXT, re.DOTALL)
text = rx_text.search(doc)
if text != None:
chunk_text = text.group(1)
else:
chunk_text = None
print "Warning: TEXT missing in DOC"
chunk.append(chunk_text)
chunk.append(cmeta)
bundle.chunks.append(chunk)
return bundle
|
py | b406b6d318db4606becb6d034b399e6a30e5efdb | """
1) 改变图像短边为 256,且保持图像长宽比例不变
2) 计算训练集合的均值和方差(直接跳过,归一化后直接用0.5和0.5代替)
"""
import os
import cv2
#### 设置路径 ####
root_dir = "/root/fly2fly/median_compute/flick30k"
img_dir = "flickr30k-images"
text_file = 'results_20130124.token'
train_fid = 'flick30k_train.txt'
savepath = 'flick30k_image_256'
def getfullpath(subdir):
return os.path.join(root_dir,subdir)
#### 初始化列表 ####
train_len = 29784
val_len = 1000
test_len = 1000
imglist = os.listdir(getfullpath(img_dir))
imglist.sort()
train_list = imglist[0:train_len]
val_list = imglist[train_len:train_len + val_len]
test_list = imglist[-test_len-1:-1]
#### 预处理数据 ####
# 1) 改变形状
if not os.path.exists(getfullpath(savepath)):
os.makedirs(getfullpath(savepath))
s = 256
for name in imglist:
fullpath = os.path.join(getfullpath(img_dir), name)
img = cv2.imread(fullpath)
h,w,c = img.shape
if h < w:
rate = s / h
# (w , h)
img = cv2.resize(img,(round(rate*w), s), interpolation = cv2.INTER_CUBIC)
else:
rate = s / w
img = cv2.resize(img,(s, round(rate*h) ), interpolation = cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(getfullpath(savepath), name),img)
|
py | b406b6eb1a47e833ed11287ac532c4b18d596b58 | from .page import *
time.sleep(2)
def case_5_2(self, full_screen):
self.page.loger('\n Запуск Тест кейс № 5_2 tvweb_new-5_2: Проверка работоспособности элементов окошка пользователя, личный кабинет \n')
emailt = '[email protected]'
passw = '111111'
time.sleep(2)
self.page.click_f('Клик_Вход', 1)
time.sleep(1)
#self.page.send_f('Ввод_логина_вход', emailt, 2)
self.driver.find_element_by_xpath('.//input[@class="authorization__login textbox"]').send_keys(emailt)
time.sleep(2)
#self.page.send_f('Ввод_пароля_вход', passw, 3)
self.driver.find_element_by_xpath('.//input[@class="authorization__password textbox"]').send_keys(passw)
time.sleep(2)
self.page.click_f('Клик_Войти_auth', 4)
time.sleep(2)
self.prof.click_f('Клик_значок_пользователя', 5)
time.sleep(2)
self.driver.find_element_by_xpath('.//span[@class="__userbalance currency- currency currency-RUB"]').click() # Клик на кошелёк
self.page.loger('Шаг 6. Клик на кошелёк')
time.sleep(7)
self.page.waitForElementVisible('.//div[@class="cabinet__content cabinet-account"]', 30) # Проверка перехода и содержание страницы
res_txt = str(self.result.find_link("div", "cabinet__content cabinet-account"))
assert('Баланс') in res_txt
assert('Пополнить счёт') in res_txt
assert('Программа лояльности') in res_txt
assert('Промокод') in res_txt
assert('Бонусная программа') in res_txt
self.page.loger('Переход в кошелёк и содержание страницы подтверждено')
time.sleep(3)
self.page.loger('Шаг 7. Проверка Истории платежей')
self.driver.find_element_by_xpath('.//button[@class="cabinet-balance__history button button_light button_stretched"]').click() # Проверка окна истории платежей
time.sleep(4)
self.page.waitForElementVisible('.//div[@class="modal__content payment-history js-modal-content modal__content_open"]', 30) # Проверка появления окна
res_txt = str(self.result.find_link("div", "modal__content payment-history js-modal-content modal__content_open"))
assert('История платежей') in res_txt
self.page.loger('Появление окна истории платежей подтверждено')
time.sleep(3)
self.page.waitForElementVisible('.//td[@class="payment-history__cell payment-history__cell_description"]', 30) # Проверка наличия платежа
self.page.loger('Наличие платежа в истории подтверждено')
time.sleep(3)
self.page.loger('Шаг 8. Клик на покупки')
self.driver.find_element_by_xpath('.//button[@data-sort-type="2"]').click() # Клик на покупки
time.sleep(3)
self.page.waitForElementVisible('.//td[@class="payment-history__cell payment-history__cell_description"]', 30) # Проверка наличия платежа
self.page.loger('Наличие платежей в покупках подтверждено')
time.sleep(3)
self.page.loger('Шаг 9. Клик на Пополнения')
self.driver.find_element_by_xpath('.//button[@data-sort-type="1"]').click() # Клик на пополнения
time.sleep(3)
self.page.waitForElementVisible('.//td[@class="payment-history__cell payment-history__cell_description"]', 30) # Проверка наличия пополнений
self.page.loger('Наличие пополнений счета подтверждено')
time.sleep(3)
self.driver.find_element_by_xpath('.//button[@class="modal__close"]').click() # Клик на крестик
time.sleep(3)
self.prof.click_f('Клик_значок_пользователя', 10)
time.sleep(3)
self.page.loger('Шаг 11. Клик на счет')
self.driver.find_element_by_xpath('.//a[@href="/profile/#tab=cabinet-account"]').click() # Клик на счет
time.sleep(6)
res_txt = str(self.result.find_link("div", "cabinet__content cabinet-account"))
assert('Баланс') in res_txt
self.page.loger('Переход в счет подтвержден')
time.sleep(3)
self.page.loger('Шаг 12. Клик на "Узнать подробности" в программе лояльности')
self.driver.find_element_by_xpath('.//a[@class="cabinet-loyalty__about button button_light button_stretched"]').click() # Клик на узнать подробности программа лояльности
time.sleep(4)
self.page.waitForElementVisible('.//h1[@class="loyalty__heading heading-1"]', 30) # Проверка перехода на страницу программы лояльности
self.page.loger('Переход на страницу программы лояльности подтвержден')
#self.driver.back()
time.sleep(5)
self.prof.click_f('Клик_значок_пользователя', 13)
time.sleep(3)
self.page.loger('Шаг 14. Клик на бонусную программу')
self.driver.find_element_by_xpath('.//a[@href="/loyalty/"]').click() # Клик на бонусную программу из окна пользователя
time.sleep(3)
self.page.waitForElementVisible('.//div[@class="loyalty__rewards loyalty-rewards"]', 10)
self.page.loger('Переход на страницу Бонусной программы подтвержден')
self.prof.click_f('Клик_значок_пользователя', 15)
time.sleep(3)
self.page.loger('Шаг 16. Клик на подписки')
self.driver.find_element_by_xpath('.//a[@href="/profile/#tab=cabinet-subscriptions"]').click() # Клик на подписки
time.sleep(5)
self.page.waitForElementVisible('.//h2[@class="cabinet__heading heading-2"][contains(., "Подписки")]', 30) # Проверка наличия надписи Подписки
self.page.waitForElementVisible('.//div[@class="cabinet-subscriptions__item subscription-card"]', 30) # Проверка наличия подписок
self.page.loger('Переход на вкладку подписки подтвержден')
time.sleep(2)
self.prof.click_f('Клик_значок_пользователя', 17)
time.sleep(3)
self.page.loger('Шаг 18. Проверка перехода на страницу/раздел мои фильмы')
self.driver.find_element_by_xpath('.//a[@href="/profile/#tab=cabinet-clips"]').click() # Клик на "Мои фильмы"
time.sleep(3)
# Проверка перехода и содержания
self.page.waitForElementVisible('.//div[@class="selection__heading heading-2"][contains(., "Купленные фильмы")]', 30) # купленные фильмы
time.sleep(2)
self.page.waitForElementVisible('.//div[@class="selection__heading heading-2"][contains(., "Избранное")]', 30) # Избранное
time.sleep(2)
self.page.waitForElementVisible('.//div[@class="selection__heading heading-2"][contains(., "История просмотра")]', 30) # История просмотра
self.page.loger('Переход в "Мои фильмы" и содежание страницы подтверждено')
time.sleep(2)
self.prof.click_f('Клик_значок_пользователя', 19)
time.sleep(3)
self.driver.find_element_by_xpath('.//a[@href="/profile/#tab=cabinet-settings"]').click() # Клик настройки
time.sleep(4)
self.page.waitForElementVisible('.//h2[@class="cabinet__heading heading-2"][contains(., "Личные данные")]', 30)
time.sleep(1)
self.page.waitForElementVisible('.//div[@class="cabinet-information__label"]', 30)
time.sleep(1)
self.page.waitForElementVisible('.//h2[@class="cabinet__heading heading-2"][contains(., "Смена пароля")]', 30)
self.page.loger('Переход на страницу "Настройки" и содержание страницы подтверждено')
time.sleep(1)
self.prof.click_f('Клик_значок_пользователя', 20)
time.sleep(3)
self.driver.find_element_by_xpath('.//a[@href="/profile/#tab=cabinet-devices"]').click() # Клик Мои устройства
time.sleep(3)
self.page.waitForElementVisible('.//div[@class="cabinet-binding__heading cabinet__heading heading-2"]', 30)
time.sleep(2)
self.page.waitForElementVisible('.//div[@class="cabinet-binding__subheading subheading-1"]', 30)
self.page.loger('Переход на страницу "Мои устройства" и содержание страницы подтверждено')
time.sleep(2)
self.driver.quit() |
py | b406b7608a6448b8e25216b210fa36bf14e9de2f | from __future__ import print_function
try:
from minio import Minio
from minio.error import ResponseError
except ImportError:
print('This test requires minio: perhaps try pip install minio')
exit()
import commands
import datetime
import os
import platform
import random
import re
import shutil
import string
import subprocess
import urllib3
from resource_suite_s3_nocache import Test_S3_NoCache_Base
import sys
if sys.version_info >= (2,7):
import unittest
else:
import unittest2 as unittest
from .. import lib
from . import session
from ..configuration import IrodsConfig
from .resource_suite import ResourceSuite
from .test_chunkydevtest import ChunkyDevTest
class Test_S3_Cache_Base(ResourceSuite, ChunkyDevTest):
def __init__(self, *args, **kwargs):
"""Set up the cache test."""
# if self.proto is defined use it else default to HTTPS
if not hasattr(self, 'proto'):
self.proto = 'HTTPS'
# if self.archive_naming_policy is defined use it
# else default to 'consistent'
if not hasattr(self, 'archive_naming_policy'):
self.archive_naming_policy = 'consistent'
super(Test_S3_Cache_Base, self).__init__(*args, **kwargs)
def setUp(self):
# skip ssl tests on ub12
distro_str = ''.join(platform.linux_distribution()[:2]).replace(' ','').replace('.', '')
if self._testMethodName.startswith('test_ssl') and distro_str.lower().startswith('ubuntu12'):
self.skipTest("skipping ssl tests on ubuntu 12")
# set up aws configuration
self.read_aws_keys()
# set up s3 bucket
try:
httpClient = urllib3.poolmanager.ProxyManager(
os.environ['http_proxy'],
timeout=urllib3.Timeout.DEFAULT_TIMEOUT,
cert_reqs='CERT_REQUIRED',
retries=urllib3.Retry(
total=5,
backoff_factor=0.2,
status_forcelist=[500, 502, 503, 504]
)
)
except KeyError:
httpClient = None
if self.proto == 'HTTPS':
s3_client = Minio(self.s3endPoint,
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
http_client=httpClient,
region=self.s3region)
else:
s3_client = Minio(self.s3endPoint,
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
http_client=httpClient,
region=self.s3region,
secure=False)
if hasattr(self, 'static_bucket_name'):
self.s3bucketname = self.static_bucket_name
else:
self.s3bucketname = 'irods-ci-' + distro_str + datetime.datetime.utcnow().strftime('-%Y-%m-%d%H-%M-%S-%f-')
self.s3bucketname += ''.join(random.choice(string.letters) for i in xrange(10))
self.s3bucketname = self.s3bucketname[:63].lower() # bucket names can be no more than 63 characters long
s3_client.make_bucket(self.s3bucketname, location=self.s3region)
# set up resources
hostname = lib.get_hostname()
s3params = 'S3_RETRY_COUNT=15;S3_WAIT_TIME_SEC=1;S3_PROTO=%s;S3_MPU_CHUNK=10;S3_MPU_THREADS=4;S3_ENABLE_MD5=1' % self.proto
s3params += ';S3_STSDATE=' + self.s3stsdate
s3params += ';S3_DEFAULT_HOSTNAME=' + self.s3endPoint
s3params += ';S3_AUTH_FILE=' + self.keypairfile
s3params += ';S3_REGIONNAME=' + self.s3region
s3params += ';ARCHIVE_NAMING_POLICY=' + self.archive_naming_policy
if hasattr(self, 's3sse'):
s3params += ';S3_SERVER_ENCRYPT=' + str(self.s3sse)
s3params=os.environ.get('S3PARAMS', s3params);
with session.make_session_for_existing_admin() as admin_session:
irods_config = IrodsConfig()
admin_session.assert_icommand("iadmin modresc demoResc name origResc", 'STDOUT_SINGLELINE', 'rename', input='yes\n')
admin_session.assert_icommand("iadmin mkresc demoResc compound", 'STDOUT_SINGLELINE', 'compound')
admin_session.assert_icommand("iadmin mkresc cacheResc 'unixfilesystem' " + hostname + ":" + irods_config.irods_directory + "/cacheRescVault", 'STDOUT_SINGLELINE', 'cacheResc')
admin_session.assert_icommand('iadmin mkresc archiveResc s3 '+hostname+':/'+self.s3bucketname+'/irods/Vault "'+s3params+'"', 'STDOUT_SINGLELINE', 'archiveResc')
admin_session.assert_icommand("iadmin addchildtoresc demoResc cacheResc cache")
admin_session.assert_icommand("iadmin addchildtoresc demoResc archiveResc archive")
super(Test_S3_Cache_Base, self).setUp()
def tearDown(self):
super(Test_S3_Cache_Base, self).tearDown()
# delete s3 bucket
try:
httpClient = urllib3.poolmanager.ProxyManager(
os.environ['http_proxy'],
timeout=urllib3.Timeout.DEFAULT_TIMEOUT,
cert_reqs='CERT_REQUIRED',
retries=urllib3.Retry(
total=5,
backoff_factor=0.2,
status_forcelist=[500, 502, 503, 504]
)
)
except KeyError:
httpClient = None
if self.proto == 'HTTPS':
s3_client = Minio(self.s3endPoint,
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
http_client=httpClient,
region=self.s3region)
else:
s3_client = Minio(self.s3endPoint,
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
http_client=httpClient,
region=self.s3region,
secure=False)
objects = s3_client.list_objects_v2(self.s3bucketname, recursive=True)
if not hasattr(self, 'static_bucket_name'):
s3_client.remove_bucket(self.s3bucketname)
# tear down resources
with session.make_session_for_existing_admin() as admin_session:
admin_session.assert_icommand("iadmin rmchildfromresc demoResc archiveResc")
admin_session.assert_icommand("iadmin rmchildfromresc demoResc cacheResc")
admin_session.assert_icommand("iadmin rmresc archiveResc")
admin_session.assert_icommand("iadmin rmresc cacheResc")
admin_session.assert_icommand("iadmin rmresc demoResc")
admin_session.assert_icommand("iadmin modresc origResc name demoResc", 'STDOUT_SINGLELINE', 'rename', input='yes\n')
shutil.rmtree(IrodsConfig().irods_directory + "/cacheRescVault", ignore_errors=True)
def read_aws_keys(self):
# read access keys from keypair file
with open(self.keypairfile) as f:
self.aws_access_key_id = f.readline().rstrip()
self.aws_secret_access_key = f.readline().rstrip()
# read the endpoint address from the file endpointfile
@staticmethod
def read_endpoint(endpointfile):
# read endpoint file
with open(endpointfile) as f:
return f.readline().rstrip()
def test_irm_specific_replica(self):
self.admin.assert_icommand("ils -L "+self.testfile,'STDOUT_SINGLELINE',self.testfile) # should be listed
self.admin.assert_icommand("irepl -R "+self.testresc+" "+self.testfile) # creates replica
self.admin.assert_icommand("ils -L "+self.testfile,'STDOUT_SINGLELINE',self.testfile) # should be listed twice
self.admin.assert_icommand("irm -n 0 "+self.testfile, 'STDOUT_SINGLELINE','deprecated') # remove original from cacheResc only
self.admin.assert_icommand("ils -L "+self.testfile,'STDOUT_SINGLELINE',["2 "+self.testresc,self.testfile]) # replica 2 should still be there
self.admin.assert_icommand_fail("ils -L "+self.testfile,'STDOUT_SINGLELINE',["0 "+self.admin.default_resource,self.testfile]) # replica 0 should be gone
trashpath = self.admin.session_collection_trash
self.admin.assert_icommand_fail("ils -L "+trashpath+"/"+self.testfile,'STDOUT_SINGLELINE',["0 "+self.admin.default_resource,self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("NOTSURE / FIXME ... -K not supported, perhaps")
def test_local_iput_checksum(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
@unittest.skip("No Vault for S3 archive resource")
def test_iput_overwrite_others_file__ticket_2086(self):
pass
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = lib.create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) #
self.admin.assert_icommand("iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," "+filename]) # default resource cache should have dirty copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," "+filename]) # default resource archive should have dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = lib.create_local_testfile(filename)
hostname = lib.get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
self.admin.assert_icommand("iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, 'STDOUT_SINGLELINE', "Creating") # create third resource
self.admin.assert_icommand("iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, 'STDOUT_SINGLELINE', "Creating") # create fourth resource
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("irepl -R thirdresc "+filename) # replicate to third resource
self.admin.assert_icommand("irepl -R fourthresc "+filename) # replicate to fourth resource
self.admin.assert_icommand("iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand(['irepl', filename]) # update replica on default resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand("irepl -aU "+filename) # update all replicas
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("irm -f "+filename) # cleanup file
self.admin.assert_icommand("iadmin rmresc thirdresc") # remove third resource
self.admin.assert_icommand("iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = lib.create_local_testfile(filename)
# assertions
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput -R "+self.testresc+" "+filename) # put file
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("irepl "+filename) # replicate to default resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand(['irepl', filename], 'STDERR', 'SYS_NOT_ALLOWED') # replicate overtop default resource
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should not have a replica 3
self.admin.assert_icommand(['irepl', '-R', self.testresc, filename], 'STDERR', 'SYS_NOT_ALLOWED') # replicate overtop test resource
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = lib.create_local_testfile(filename)
hostname = lib.get_hostname()
# assertions
self.admin.assert_icommand("iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, 'STDOUT_SINGLELINE', "Creating") # create third resource
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("irepl -R thirdresc "+filename) # replicate to third resource
self.admin.assert_icommand(['irepl', filename], 'STDERR', 'SYS_NOT_ALLOWED') # replicate overtop default resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand(['irepl', '-R', self.testresc, filename], 'STDERR', 'SYS_NOT_ALLOWED') # replicate overtop test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand(['irepl', '-R', 'thirdresc', filename], 'STDERR', 'SYS_NOT_ALLOWED') # replicate overtop third resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should not have a replica 4
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 5 "," & "+filename]) # should not have a replica 5
self.admin.assert_icommand("irm -f "+filename) # cleanup file
self.admin.assert_icommand("iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = lib.create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
self.admin.assert_icommand("ils -L " + filename, 'STDERR_SINGLELINE', "does not exist") # should not be listed
self.admin.assert_icommand("iput " + filename) # put file
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', filename) # for debugging
self.admin.assert_icommand("irepl -R " + self.testresc + " " + filename) # replicate to test resource
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', filename) # for debugging
# overwrite default repl with different data
self.admin.assert_icommand("iput -f %s %s" % (doublefile, filename))
# default resource cache should have clean copy
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', [" 0 ", " & " + filename])
# default resource cache should have new double clean copy
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', [" 0 ", " " + doublesize + " ", " & " + filename])
# default resource archive should have clean copy
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', [" 1 ", " & " + filename])
# default resource archive should have new double clean copy
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', [" 1 ", " " + doublesize + " ", " & " + filename])
# test resource should not have doublesize file
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE',
[" 2 " + self.testresc, " " + doublesize + " ", " " + filename])
# replicate back onto test resource
self.admin.assert_icommand("irepl -R " + self.testresc + " " + filename)
# test resource should have new clean doublesize file
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE',
[" 2 " + self.testresc, " " + doublesize + " ", " & " + filename])
# should not have a replica 3
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', [" 3 ", " & " + filename])
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
with open(filepath, 'wt') as f:
print("TESTFILE -- [" + filepath + "]", file=f, end='')
try:
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', filename) # should not be listed
self.admin.assert_icommand("iput --purgec " + filename) # put file
# should not be listed (trimmed)
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', [" 0 ", filename])
# should be listed once - replica 1
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', [" 1 ", filename])
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', [" 2 ", filename]) # should be listed only once
self.admin.assert_icommand(['irm', '-f', filename])
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', filename) # should not be listed
self.admin.assert_icommand(['iput', '-b', '--purgec', filename]) # put file... in bulk!
# should not be listed (trimmed)
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', [" 0 ", filename])
# should be listed once - replica 1
self.admin.assert_icommand("ils -L " + filename, 'STDOUT_SINGLELINE', [" 1 ", filename])
self.admin.assert_icommand_fail("ils -L " + filename, 'STDOUT_SINGLELINE', [" 2 ", filename]) # should be listed only once
finally:
if os.path.exists(filepath):
os.unlink(filepath)
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("iget -f --purgec "+filename) # get file and purge 'cached' replica
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 ",filename]) # should not be listed (trimmed)
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 ",filename]) # should be listed once
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 ",filename]) # should not be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R " + self.testresc + " --purgec " + filename) # replicate to test resource
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 ",filename]) # should not be listed (trimmed)
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 ",filename]) # should be listed twice - 2 of 3
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 ",filename]) # should be listed twice - 1 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_decoupled_naming_policy(self):
if self.archive_naming_policy != 'decoupled':
self.skipTest("Archive naming policy is not set to 'decoupled'")
# local setup
filename = self.testfile
# run as regular user
session = self.user0
collection = session.session_collection
# iquest to get the object id of the replica on the S3 archive
id_query = ( "select DATA_ID where COLL_NAME =" + "'" + collection + "'" +
" and DATA_NAME =" + "'" + filename + "'" +
" and DATA_REPL_NUM ='1'" )
# iquest to get the pysical path of the replica on the S3 archive
path_query = ( "select DATA_PATH where COLL_NAME =" + "'" + collection + "'" +
" and DATA_NAME =" + "'" + filename + "'" +
" and DATA_REPL_NUM ='1'" )
# assertions
session.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
session.assert_icommand("iput "+filename) # put file
# get object id
object_id = session.run_icommand('iquest "%s" ' + '"' + id_query + '"')[0].strip()
# physical path we expect to see: /{bucket_name}/{reversed_id}/{obj_name}
target_path = '/' + self.s3bucketname + '/' + object_id[::-1] + '/' + filename
# get object path
physical_path = session.run_icommand('iquest "%s" ' + '"' + path_query + '"')[0].strip()
# verify object path
self.assertEqual(target_path, physical_path)
# move the file
new_filename = "%s.new" % filename
session.assert_icommand("imv %s %s" % (filename, new_filename))
# get and purge cache replica
session.assert_icommand("iget -f --purgec %s" % new_filename) # get file and purge 'cached' replica
# get again now that it is not in cache
session.assert_icommand("iget -f %s" % new_filename) # get file
# cleanup
session.run_icommand('irm -f ' + new_filename)
def test_decoupled_naming_policy_issue1855(self):
if self.archive_naming_policy != 'decoupled':
self.skipTest("Archive naming policy is not set to 'decoupled'")
# local setup
filename = self.testfile
# run as regular user
session = self.user0
collection = session.session_collection
# modify the s3 archive resource so that it only has the bucket name in the context
self.admin.assert_icommand('iadmin modresc archiveResc path /%s' % self.s3bucketname, 'STDOUT_SINGLELINE', 'Previous resource path:')
# iquest to get the object id of the replica on the S3 archive
id_query = ( "select DATA_ID where COLL_NAME =" + "'" + collection + "'" +
" and DATA_NAME =" + "'" + filename + "'" +
" and DATA_REPL_NUM ='1'" )
# iquest to get the pysical path of the replica on the S3 archive
path_query = ( "select DATA_PATH where COLL_NAME =" + "'" + collection + "'" +
" and DATA_NAME =" + "'" + filename + "'" +
" and DATA_REPL_NUM ='1'" )
# assertions
session.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
session.assert_icommand("iput "+filename) # put file
# get object id
object_id = session.run_icommand('iquest "%s" ' + '"' + id_query + '"')[0].strip()
# physical path we expect to see: /{bucket_name}/{reversed_id}/{obj_name}
target_path = '/' + self.s3bucketname + '/' + object_id[::-1] + '/' + filename
# get object path
physical_path = session.run_icommand('iquest "%s" ' + '"' + path_query + '"')[0].strip()
# verify object path
self.assertEqual(target_path, physical_path)
# move the file
new_filename = "%s.new" % filename
session.assert_icommand("imv %s %s" % (filename, new_filename))
# get and purge cache replica
session.assert_icommand("iget -f --purgec %s" % new_filename) # get file and purge 'cached' replica
# get again now that it is not in cache
session.assert_icommand("iget -f %s" % new_filename) # get file
# cleanup
session.run_icommand('irm -f ' + filename)
@unittest.skip("skip until minio added to CI")
def test_multiple_s3_endpoints_replication_issue1858(self):
# local setup
filename = self.testfile
# run as regular user
session = self.user0
collection = session.session_collection
# set up resources
# TODO change these as necessary
minio_auth_file = '/var/lib/irods/s3.keypair'
minio_bucket_name = 'irods-bucket'
hostname = lib.get_hostname()
s3params_aws = 'S3_RETRY_COUNT=1;S3_WAIT_TIME_SEC=1;S3_PROTO=%s;S3_MPU_CHUNK=10;S3_MPU_THREADS=4;S3_ENABLE_MD5=1' % self.proto
s3params_aws += ';S3_DEFAULT_HOSTNAME=%s' % self.s3endPoint
s3params_aws += ';S3_AUTH_FILE=%s' % self.keypairfile
s3params_aws += ';S3_REGIONNAME=%s' % self.s3region
s3params_aws += ';ARCHIVE_NAMING_POLICY=%s' % self.archive_naming_policy
s3params_minio = 'S3_RETRY_COUNT=1;S3_WAIT_TIME_SEC=1;S3_PROTO=%s;S3_MPU_CHUNK=10;S3_MPU_THREADS=4;S3_ENABLE_MD5=1' % self.proto
s3params_minio += ';S3_DEFAULT_HOSTNAME=%s:9000' % hostname
s3params_minio += ';S3_AUTH_FILE=%s' % minio_auth_file
s3params_minio += ';S3_REGIONNAME=%s' % self.s3region
s3params_minio += ';ARCHIVE_NAMING_POLICY=%s' % self.archive_naming_policy
try:
# make resource tree with repl and two compound resources underneath
self.admin.assert_icommand('iadmin mkresc s3repl_1858 replication', 'STDOUT_SINGLELINE', 'Creating')
self.admin.assert_icommand('iadmin mkresc s3compound1_1858 compound', 'STDOUT_SINGLELINE', 'Creating')
self.admin.assert_icommand('iadmin mkresc s3compound2_1858 compound', 'STDOUT_SINGLELINE', 'Creating')
self.admin.assert_icommand('iadmin mkresc s3cache1_1858 unixfilesystem %s:/tmp/s3cache1_1858 unixfilesystem' % hostname, 'STDOUT_SINGLELINE', 'Creating')
self.admin.assert_icommand('iadmin mkresc s3archive1_1858 s3 %s:/%s/irods/Vault %s' % (hostname, self.s3bucketname, s3params_aws), 'STDOUT_SINGLELINE', 's3archive1_1858')
self.admin.assert_icommand('iadmin mkresc s3cache2_1858 unixfilesystem %s:/tmp/s3cache2_1858 unixfilesystem' % hostname, 'STDOUT_SINGLELINE', 'Creating')
self.admin.assert_icommand('iadmin mkresc s3archive2_1858 s3 %s:/%s/irods/s3archive2_1858_vault %s' % (hostname, minio_bucket_name, s3params_minio), 'STDOUT_SINGLELINE', 's3archive2_1858')
self.admin.assert_icommand('iadmin addchildtoresc s3repl_1858 s3compound1_1858')
self.admin.assert_icommand('iadmin addchildtoresc s3repl_1858 s3compound2_1858')
self.admin.assert_icommand('iadmin addchildtoresc s3compound1_1858 s3cache1_1858 cache')
self.admin.assert_icommand('iadmin addchildtoresc s3compound1_1858 s3archive1_1858 archive')
self.admin.assert_icommand('iadmin addchildtoresc s3compound2_1858 s3cache2_1858 cache')
self.admin.assert_icommand('iadmin addchildtoresc s3compound2_1858 s3archive2_1858 archive')
# put a file to this tree
session.assert_icommand('iput -R s3repl_1858 %s' % filename) # put file
# make sure we have four replicas
session.assert_icommand('ils -L %s' % filename, 'STDOUT_MULTILINE', ['s3repl_1858;s3compound1_1858;s3cache1_1858',
's3repl_1858;s3compound1_1858;s3archive1_1858',
's3repl_1858;s3compound2_1858;s3cache2_1858',
's3repl_1858;s3compound2_1858;s3archive2_1858'])
finally:
# remove the file
session.assert_icommand('irm -f %s' % filename) # remove file
# cleanup
self.admin.assert_icommand('iadmin rmchildfromresc s3repl_1858 s3compound1_1858')
self.admin.assert_icommand('iadmin rmchildfromresc s3repl_1858 s3compound2_1858')
self.admin.assert_icommand('iadmin rmchildfromresc s3compound1_1858 s3cache1_1858 cache')
self.admin.assert_icommand('iadmin rmchildfromresc s3compound1_1858 s3archive1_1858 archive')
self.admin.assert_icommand('iadmin rmchildfromresc s3compound2_1858 s3cache2_1858 cache')
self.admin.assert_icommand('iadmin rmchildfromresc s3compound2_1858 s3archive2_1858 archive')
self.admin.assert_icommand('iadmin rmresc s3repl_1858')
self.admin.assert_icommand('iadmin rmresc s3compound1_1858')
self.admin.assert_icommand('iadmin rmresc s3compound2_1858')
self.admin.assert_icommand('iadmin rmresc s3cache1_1858')
self.admin.assert_icommand('iadmin rmresc s3archive1_1858')
self.admin.assert_icommand('iadmin rmresc s3cache2_1858')
self.admin.assert_icommand('iadmin rmresc s3archive2_1858')
|
py | b406b791acc87a9f8899bd2e7bae00d0b568bcec |
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA, update_statusbar
class AnacondaNextLintError(sublime_plugin.WindowCommand):
"""Jump to the next lint error on the page
"""
def run(self) -> None:
self.jump(self._harvest_next())
update_statusbar(self.window.active_view())
def is_enabled(self) -> bool:
"""Determines if the command is enabled
"""
view = self.window.active_view()
if (view.file_name() in ANACONDA['DISABLED']
or not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
def jump(self, lineno: int = None) -> None:
"""Jump to a line in the view buffer
"""
if lineno is None:
sublime.status_message('No lint errors')
return
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show_at_center(pt)
def _harvest_next(self) -> int:
"""Harvest the next error that we find and return it back
"""
(cur_line, cur_col) = self.window.active_view().rowcol(
self.window.active_view().sel()[0].begin()
)
lines = set([])
vid = self.window.active_view().id()
for error_type in ['ERRORS', 'WARNINGS', 'VIOLATIONS']:
for line, _ in ANACONDA[error_type].get(vid, {}).items():
lines.add(int(line))
lines = sorted(lines)
if not len(lines):
return None
if cur_line is not None and lines[-1] > cur_line:
lines = [l for l in lines if l > cur_line]
return lines[0] if len(lines) > 0 else None
|
py | b406b9cebe10459aedb1b433ffd9eff0ece9a2ee | # -*- coding: utf-8 -*-
# Copyright (2017-2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import json
# 3rd party libs
from flask_api import status
from hpOneView.exceptions import HPOneViewException
from hpOneView.resources.servers.server_profiles import ServerProfiles
from hpOneView.resources.servers.server_hardware import ServerHardware
# Module libs
from oneview_redfish_toolkit.blueprints import network_interface_collection
from oneview_redfish_toolkit.tests.base_flask_test import BaseFlaskTest
class TestNetworkInterfaceCollection(BaseFlaskTest):
"""Tests for NetworkInterfaceCollection blueprint"""
@classmethod
def setUpClass(self):
super(TestNetworkInterfaceCollection, self).setUpClass()
self.app.register_blueprint(
network_interface_collection.network_interface_collection)
with open(
'oneview_redfish_toolkit/mockups/oneview/ServerProfile.json'
) as f:
self.server_profile = json.load(f)
with open(
'oneview_redfish_toolkit/mockups/oneview/ServerHardware.json'
) as f:
self.server_hardware = json.load(f)
def test_get_network_interface_collection(self):
"""Tests NetworkInterfaceCollection"""
with open(
'oneview_redfish_toolkit/mockups/redfish/'
'NetworkInterfaceCollection.json'
) as f:
network_interface_collection_mockup = json.load(f)
profile_obj = ServerProfiles(self.oneview_client, self.server_profile)
serverhw_obj = ServerHardware(
self.oneview_client, self.server_hardware)
self.oneview_client.\
server_profiles.get_by_id.return_value = profile_obj
self.oneview_client.server_hardware.get_by_uri.return_value = \
serverhw_obj
response = self.client.get(
"/redfish/v1/Systems/b425802b-a6a5-4941-8885-aab68dfa2ee2/"
"NetworkInterfaces/"
)
result = json.loads(response.data.decode("utf-8"))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual("application/json", response.mimetype)
self.assertEqualMockup(network_interface_collection_mockup, result)
self.oneview_client.server_profiles.get_by_id.assert_called_with(
self.server_profile["uuid"])
self.oneview_client.server_hardware.get_by_uri.assert_called_with(
self.server_profile["serverHardwareUri"])
def test_get_network_interface_collection_when_profile_not_found(
self):
"""Tests when the searching a server profile returns not found"""
e = HPOneViewException({
'errorCode': 'RESOURCE_NOT_FOUND',
'message': 'server-hardware not found',
})
self.oneview_client.server_profiles.get_by_id.side_effect = e
response = self.client.get(
"/redfish/v1/Systems/b425802b-a6a5-4941-8885-aab68dfa2ee2/"
"NetworkInterfaces/"
)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
self.assertEqual("application/json", response.mimetype)
self.oneview_client.server_profiles.get_by_id.assert_called_with(
self.server_profile["uuid"])
self.oneview_client.server_hardware.get_by_uri.assert_not_called()
def test_get_network_interface_collection_when_server_hardware_not_found(
self):
"""Tests when the searching a server hardware returns not found"""
e = HPOneViewException({
'errorCode': 'RESOURCE_NOT_FOUND',
'message': 'server-hardware not found',
})
self.oneview_client.server_profiles.get_by_id.side_effect = e
response = self.client.get(
"/redfish/v1/Systems/b425802b-a6a5-4941-8885-aab68dfa2ee2/"
"NetworkInterfaces/"
)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
self.assertEqual("application/json", response.mimetype)
self.oneview_client.server_profiles.get_by_id.assert_called_with(
self.server_profile["uuid"])
self.oneview_client.server_hardware.get_by_uri.assert_not_called()
def test_get_network_interface_collection_when_profile_raise_any_exception(
self):
"""Tests when the searching a server profile raises any exception"""
e = HPOneViewException({
'errorCode': 'ANOTHER_ERROR',
'message': 'server-hardware-types error',
})
self.oneview_client.server_profiles.get_by_id.side_effect = e
response = self.client.get(
"/redfish/v1/Systems/b425802b-a6a5-4941-8885-aab68dfa2ee2/"
"NetworkInterfaces/"
)
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code
)
self.assertEqual("application/json", response.mimetype)
self.oneview_client.server_profiles.get_by_id.assert_called_with(
self.server_profile["uuid"])
self.oneview_client.server_hardware.get_by_uri.assert_not_called()
|
py | b406ba187b7a47c7e839b8c094b89ac20ff397f2 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from gaiatest import GaiaTestCase
from gaiatest.apps.messages.app import Messages
class TestSmsWithPictureAttached(GaiaTestCase):
_text_message_content = 'Automated Test %s' % str(time.time())
def setUp(self):
GaiaTestCase.setUp(self)
# connect to mobile data
self.data_layer.connect_to_cell_data()
# add photo to storage
self.push_resource('IMG_0001.jpg')
def test_sms_cropped_picture(self):
"""
https://moztrap.mozilla.org/manage/case/10742/
"""
# launch the app
messages = Messages(self.marionette)
messages.launch()
# click new message
new_message = messages.tap_create_new_message()
# type phone number
new_message.type_phone_number(self.testvars['local_phone_numbers'][0])
# type text message
new_message.type_message(self._text_message_content)
# add attachment
activities_list = new_message.tap_attachment()
# select gallery
gallery = activities_list.tap_gallery()
# go through the crop process
gallery.wait_for_thumbnails_to_load()
gallery.thumbnails[0].tap()
from gaiatest.apps.gallery.regions.crop_view import CropView
crop_view = CropView(self.marionette)
# can't actually crop the element
crop_view.tap_crop_done()
# back to messages app frame
new_message.wait_for_resizing_to_finish()
# Tap on attachment
attachment_options = new_message.tap_image_attachment()
view_image = attachment_options.tap_view_button()
# Check that the attached image is displayed
self.assertTrue(view_image.is_image_visible)
view_image.tap_back_button()
attachment_options.tap_cancel()
# click send
self.message_thread = new_message.tap_send(timeout=300)
self.message_thread.wait_for_received_messages(timeout=300)
# get the most recent listed and most recent received text message
last_received_message = self.message_thread.received_messages[-1]
last_message = self.message_thread.all_messages[-1]
# Check the most recent received message has the same text content
self.assertEqual(self._text_message_content, last_received_message.text.strip('\n').strip())
# Check that most recent message is also the most recent received message
self.assertEqual(last_received_message.id, last_message.id)
# Check that message has attachments
self.assertTrue(last_message.has_attachments)
# Tap on the attachment
view_image = last_message.tap_attachment()
# Check that the attached image is displayed
self.assertTrue(view_image.is_image_visible)
|
py | b406ba893c105f4d3e8723cd91cd2297a7fd29a7 | from django.shortcuts import get_object_or_404, render, render_to_response
from ..cart.forms import CartAddProductForm
from .forms import ProductsSearchForm
from .models import Category, Product
def main(request):
context = {
'form': ProductsSearchForm(request.GET),
}
return render(request, 'shop/main.html', context)
def product_list(request, category_slug=None):
if category_slug is None:
category = None
products = Product.objects.filter(available=True)
else:
category = get_object_or_404(Category, slug=category_slug)
products = category.products
context = {
'category': category,
'categories': Category.objects.all(),
'products': products
}
return render(request, 'shop/product_list.html', context)
def product_detail(request, slug):
product = get_object_or_404(Product, slug=slug, available=True)
context = {
'product': product,
'cart_product_form': CartAddProductForm(product_id=product.id),
'images': [img.image for img in product.images.all()]
}
return render(request, 'shop/product_detail.html', context)
def product_list_by_manufacturer(request):
pass
def products(request):
form = ProductsSearchForm(request.GET)
context = {
'form': form,
'products': form.search()
}
return render_to_response('search/search.html', context)
|
py | b406bab862bb73374f9ac0ebde8cd73cddcec3a7 | ########################################################################
# SwarmOps - Heuristic optimization for Python.
# Copyright (C) 2003-2016 Magnus Erik Hvass Pedersen.
# See the file README.md for instructions.
# See the file LICENSE.txt for license details.
# SwarmOps on the internet: http://www.Hvass-Labs.org/
########################################################################
########################################################################
# Particle Swarm Optimization (PSO).
#
# PSO is a heuristic optimizer that does not use the gradient of the problem
# being optimized. A so-called global-best variant of the PSO is implemented here.
# A simple PSO variant is also implemented here.
#
# Search-space boundaries are necessary for this PSO variant to work properly.
# So if your optimization problem does not have natural boundaries, you should
# simply choose some boundaries that are reasonable.
#
# PSO starts by creating a number of random trials called particles. In each
# iteration, these particles are moved around in the search-space using a
# formula that involves the particle's best-known position as well as the
# entire swarm's best-known position. This has been found to work well for
# optimizing many difficult problems, although a satisfactory solution is
# not guaranteed to be found.
#
# The PSO was originally proposed around year 1995, see [1] and [2]. In the
# following 20 years, thousands of PSO variants have been proposed.
# One of the early and basic variants of the PSO is implemented here.
# Newer PSO variants often claim to adapt the control parameters during
# optimization, thus making the PSO adapt better to new problems. But it
# was found in [3] that the basic PSO could perform just as well if using
# proper control parameters. Control parameters tuned for different
# optimization scenarios are given in [4] and included in this file below.
#
# References:
#
# [1] J. Kennedy, R.C. Eberhart. Particle Swarm Optimization. Proceedings of
# IEEE International Conference on Neural Networks. pp. 1942-1948. 1995.
#
# [2] Y. Shi, R.C. Eberhart. A modified particle swarm optimizer. Proceedings
# of IEEE International Conference on Evolutionary Computation. pp. 69-73. 1998.
#
# [3] M.E.H. Pedersen. Tuning & Simplifying Heuristical Optimization (PhD thesis).
# University of Southampton, School of Engineering Sciences. 2010
# http://www.hvass-labs.org/people/magnus/thesis/pedersen08thesis.pdf
#
# [4] M.E.H. Pedersen. Good parameters for particle swarm optimization.
# Technical Report HL-1001, Hvass Laboratories. 2010.
# http://www.hvass-labs.org/people/magnus/publications/pedersen10good-pso.pdf
#
########################################################################
import numpy as np
from swarmops.Optimize import SingleRun
from swarmops import tools
##################################################
class Base(SingleRun):
def __init__(self, problem, parallel=False, *args, **kwargs):
"""
Create object instance and perform a single optimization run using PSO.
:param problem: The problem to be optimized. Instance of Problem-class.
:param parallel:
Evaluate the fitness for the particles in parallel.
See the README.md file for a discussion on this.
:return:
Object instance. Get the optimization results from the object's variables.
- best is the best-found solution.
- best_fitness is the associated fitness of the best-found solution.
- fitness_trace is an instance of the FitnessTrace-class.
"""
# Copy arguments to instance variables.
self.problem = problem
self.parallel = parallel
# Initialize all particles with random positions in the search-space.
# The first index is for the particle number.
# The second index is for the search-space dimension.
# Note that self.num_particles must be set prior to this by the sub-class.
self.particle = tools.rand_population(lower=problem.lower_init,
upper=problem.upper_init,
num_agents=self.num_particles,
dim=problem.dim)
# Initialize best-known positions for the particles to their starting positions.
# A copy is made because the particle positions will change during optimization
# regardless of improvement to the particle's fitness.
self.particle_best = np.copy(self.particle)
# Initialize fitness of best-known particle positions to infinity.
self.particle_best_fitness = np.repeat(np.inf, self.num_particles)
# Boundaries for the velocity. These are set to the range of the search-space.
bound_range = np.abs(problem.upper_bound - problem.lower_bound)
self.velocity_lower_bound = -bound_range
self.velocity_upper_bound = bound_range
# Initialize all velocities with random values in the allowed range.
self.velocity = tools.rand_population(lower=self.velocity_lower_bound,
upper=self.velocity_upper_bound,
num_agents=self.num_particles,
dim=problem.dim)
# Initialize parent-class which also starts the optimization run.
SingleRun.__init__(self, *args, **kwargs)
def _optimize(self):
"""
Perform a single optimization run.
This function is called by the parent-class.
"""
# Calculate fitness for the initial particle positions.
self._update_fitness()
# Optimization iterations.
# The counting starts with num_particles because the fitness has
# already been calculated once for each particle during initialization.
for i in range(self.num_particles, self.max_evaluations, self.num_particles):
# Update the particle velocities and positions.
self._update_particles()
# Update the fitness for each particle.
self._update_fitness()
# Call parent-class to print status etc. during optimization.
self._iteration(i)
def _fitness(self, i):
"""
Calculate the fitness for the i'th particle.
"""
return self.problem.fitness(self.particle[i, :], limit=self.particle_best_fitness[i])
def _update_fitness(self):
"""
Calculate and update the fitness for each particle. Also updates the particle's
and swarm's best-known fitness and position if an improvement is found.
"""
if not self.parallel:
# Calculate the fitness for each particle. Not parallel.
new_fitness = [self._fitness(i) for i in range(self.num_particles)]
else:
import multiprocessing as mp
# Create a pool of workers sized according to the CPU cores available.
pool = mp.Pool()
# Calculate the fitness for each particle in parallel.
new_fitness = pool.map(self._fitness, range(self.num_particles))
# Close the pool of workers and wait for them all to finish.
pool.close()
pool.join()
# For each particle.
for i in range(self.num_particles):
# If the fitness is an improvement over the particle's best-known fitness.
if new_fitness[i] < self.particle_best_fitness[i]:
# Update the particle's best-known fitness and position.
self.particle_best_fitness[i] = new_fitness[i]
self.particle_best[i, :] = self.particle[i, :]
# Update the entire swarm's best-known fitness and position if an improvement.
# The parent-class is used for this.
self._update_best(fitness=new_fitness[i],
x=self.particle_best[i, :])
##################################################
class PSO(Base):
"""
Perform a single optimization run using Particle Swarm Optimization (PSO).
This is a so-called global-best variant, although it may have slightly
different features than other global-best variants in the research literature.
In practice, you would typically perform multiple optimization runs using
the MultiRun-class. The reason is that PSO is a heuristic optimizer so
there is no guarantee that an acceptable solution is found in any single
run. It is more likely that an acceptable solution is found if you perform
multiple optimization runs.
Control parameters have been tuned for different optimization scenarios.
First try and use the default parameters. If that does not give
satisfactory results, then you may try some of the following.
Select the parameters that most closely match your problem.
For example, if you want to optimize a problem where the search-space
has 15 dimensions and you can perform 30000 evaluations, then you could
first try using parameters_20dim_40000eval. If that does not give
satisfactory results then you could try using parameters_10dim_20000eval.
If that does not work then you will either need to meta-optimize the
parameters for the problem at hand, or you should try using another optimizer.
"""
# Name of this optimizer.
name = "PSO"
name_full = "Particle Swarm Optimization (Global-Best Variant)"
# Number of control parameters for PSO. Used by MetaFitness-class.
num_parameters = 4
# Lower boundaries for the control parameters of PSO. Used by MetaFitness-class.
parameters_lower_bound = [1.0, -2.0, -4.0, -4.0]
# Upper boundaries for the control parameters of PSO. Used by MetaFitness-class.
parameters_upper_bound = [300.0, 2.0, 4.0, 6.0]
@staticmethod
def parameters_list(num_particles, omega, phi_p, phi_g):
"""
Create a list with PSO parameters in the correct order.
:param num_particles: Number of particles for the PSO swarm.
:param omega: The omega parameter (aka. inertia weight) for the PSO.
:param phi_p: The phi_p parameter (aka. particle weight) for the PSO.
:param phi_g: The phi_g parameter (aka. social weight) for the PSO.
:return: List with PSO parameters.
"""
return [num_particles, omega, phi_p, phi_g]
@staticmethod
def parameters_dict(parameters):
"""
Create and return a dict from a list of PSO parameters.
This is useful for printing the named parameters.
:param parameters: List with PSO parameters assumed to be in the correct order.
:return: Dict with PSO parameters.
"""
return {'num_particles': parameters[0],
'omega': parameters[1],
'phi_p': parameters[2],
'phi_g': parameters[3]}
# Default parameters for the PSO which will be used if no other parameters are specified.
# These are a compromise of the tuned parameters below. Try this first and see if it works.
parameters_default = [50.0, -0.4, -0.3, 3.9]
# Parameters tuned by hand. These are common in the older research literature on PSO
# but perform much worse than meta-optimized parameters, especially for this PSO variant.
parameters_hand_tuned = [50.0, 0.729, 1.49445, 1.49445]
# Parameters tuned for benchmark problems in 2 dimensions using 400 fitness evaluations.
parameters_2dim_400eval_a = [25.0, 0.3925, 2.5586, 1.3358]
parameters_2dim_400eval_b = [29.0, -0.4349, -0.6504, 2.2073]
# Parameters tuned for benchmark problems in 2 dimensions using 4000 fitness evaluations.
parameters_2dim_4000eval_a = [156.0, 0.4091, 2.1304, 1.0575]
parameters_2dim_4000eval_b = [237.0, -0.2887, 0.4862, 2.5067]
# Parameters tuned for benchmark problems in 5 dimensions using 1000 fitness evaluations.
parameters_5dim_1000eval_a = [63.0, -0.3593, -0.7238, 2.0289]
parameters_5dim_1000eval_b = [47.0, -0.1832, 0.5287, 3.1913]
# Parameters tuned for benchmark problems in 5 dimensions using 10000 fitness evaluations.
parameters_5dim_10000eval_a = [223.0, -0.3699, -0.1207, 3.3657]
parameters_5dim_10000eval_b = [203.0, 0.5069, 2.5524, 1.0056]
# Parameters tuned for benchmark problems in 10 dimensions using 2000 fitness evaluations.
parameters_10dim_2000eval_a = [63.0, 0.6571, 1.6319, 0.6239]
parameters_10dim_2000eval_b = [204.0, -0.2134, -0.3344, 2.3259]
# Parameters tuned for benchmark problems in 10 dimensions using 20000 fitness evaluations.
parameters_10dim_20000eval = [53.0, -0.3488, -0.2746, 4.8976]
# Parameters tuned for benchmark problems in 20 dimensions using 40000 fitness evaluations.
parameters_20dim_40000eval = [69.0, -0.4438, -0.2699, 3.395]
# Parameters tuned for benchmark problems in 20 dimensions using 400000 fitness evaluations.
parameters_20dim_400000eval_a = [149.0, -0.3236, -0.1136, 3.9789]
parameters_20dim_400000eval_b = [60.0, -0.4736, -0.97, 3.7904]
parameters_20dim_400000eval_c = [256.0, -0.3499, -0.0513, 4.9087]
# Parameters tuned for benchmark problems in 30 dimensions using 60000 fitness evaluations.
parameters_30dim_60000eval = [134.0, -0.1618, 1.8903, 2.1225]
# Parameters tuned for benchmark problems in 30 dimensions using 600000 fitness evaluations.
parameters_30dim_600000eval = [95.0, -0.6031, -0.6485, 2.6475]
# Parameters tuned for benchmark problems in 50 dimensions using 100000 fitness evaluations.
parameters_50dim_100000eval = [106.0, -0.2256, -0.1564, 3.8876]
# Parameters tuned for benchmark problems in 100 dimensions using 200000 fitness evaluations.
parameters_100dim_200000eval = [161.0, -0.2089, -0.0787, 3.7637]
def __init__(self, parameters=parameters_default, *args, **kwargs):
"""
Create object instance and perform a single optimization run using PSO.
:param parameters:
Control parameters for the PSO.
These may have a significant impact on the optimization performance.
First try and use the default parameters and if they don't give satisfactory
results, then experiment with other parameters.
:return:
Object instance. Get the optimization results from the object's variables.
- best is the best-found solution.
- best_fitness is the associated fitness of the best-found solution.
- fitness_trace is an instance of the FitnessTrace-class.
"""
# Unpack control parameters.
self.num_particles, self.omega, self.phi_p, self.phi_g = parameters
# The number of particles must be an integer.
self.num_particles = int(self.num_particles)
# Initialize parent-class which also starts the optimization run.
Base.__init__(self, *args, **kwargs)
def _update_particles(self):
"""
Update the velocities and positions for all particles.
This does not update the fitness for each particle.
"""
# Random values between zero and one. One random value per particle.
rand_p = tools.rand_uniform(size=self.num_particles)
rand_g = tools.rand_uniform(size=self.num_particles)
# Update velocity for all particles using numpy operations.
# For an explanation of this formula, see the research papers referenced above.
# Note that self.best is the swarm's best-known position aka. global-best.
self.velocity = (self.omega * self.velocity.T \
+ self.phi_p * rand_p * (self.particle_best - self.particle).T \
+ self.phi_g * rand_g * (self.best - self.particle).T).T
# Fix de-normalized floating point values which can make the execution very slow.
self.velocity = tools.denormalize_trunc(self.velocity)
# Bound velocity.
self.velocity = tools.bound(self.velocity, self.velocity_lower_bound, self.velocity_upper_bound)
# Update particle positions in the search-space by adding the velocity.
self.particle = self.particle + self.velocity
# Bound particle position to search-space.
self.particle = tools.bound(self.particle, self.problem.lower_bound, self.problem.upper_bound)
##################################################
class MOL(Base):
"""
Perform a single optimization run using Many Optimizing Liaisons (MOL).
In practice, you would typically perform multiple optimization runs using
the MultiRun-class. The reason is that MOL is a heuristic optimizer so
there is no guarantee that an acceptable solution is found in any single
run. It is more likely that an acceptable solution is found if you perform
multiple optimization runs.
Control parameters have been tuned for different optimization scenarios.
First try and use the default parameters. If that does not give
satisfactory results, then you may try some of the following.
Select the parameters that most closely match your problem.
For example, if you want to optimize a problem where the search-space
has 15 dimensions and you can perform 30000 evaluations, then you could
first try using parameters_20dim_40000eval. If that does not give
satisfactory results then you could try using parameters_10dim_20000eval.
If that does not work then you will either need to meta-optimize the
parameters for the problem at hand, or you should try using another optimizer.
"""
# Name of this optimizer.
name = "MOL"
name_full = "Many Optimizing Liaisons (Simple Variant of PSO)"
# Number of control parameters for MOL. Used by MetaFitness-class.
num_parameters = 3
# Lower boundaries for the control parameters of MOL. Used by MetaFitness-class.
parameters_lower_bound = [1.0, -2.0, -4.0]
# Upper boundaries for the control parameters of MOL. Used by MetaFitness-class.
parameters_upper_bound = [300.0, 2.0, 6.0]
@staticmethod
def parameters_dict(parameters):
"""
Create and return a dict from a list of MOL parameters.
This is useful for printing the named parameters.
:param parameters: List with MOL parameters assumed to be in the correct order.
:return: Dict with MOL parameters.
"""
return {'num_particles': parameters[0],
'omega': parameters[1],
'phi_g': parameters[2]}
@staticmethod
def parameters_list(num_particles, omega, phi_g):
"""
Create a list with MOL parameters in the correct order.
:param num_particles: Number of particles for the MOL swarm.
:param omega: The omega parameter (aka. inertia weight) for the MOL.
:param phi_g: The phi_g parameter (aka. social weight) for the MOL.
:return: List with MOL parameters.
"""
return [num_particles, omega, phi_g]
# Default parameters for MOL which will be used if no other parameters are specified.
# These are a compromise of the tuned parameters below. Try this first and see if it works.
parameters_default = [100.0, -0.35, 3.0]
# Parameters tuned for benchmark problems in 2 dimensions using 400 fitness evaluations.
parameters_2dim_400eval_a = [23.0, -0.3328, 2.8446]
parameters_2dim_400eval_b = [50.0, 0.2840, 1.9466]
# Parameters tuned for benchmark problems in 2 dimensions using 4000 fitness evaluations.
parameters_2dim_4000eval_a = [183.0, -0.2797, 3.0539]
parameters_2dim_4000eval_b = [139.0, 0.6372, 1.0949]
# Parameters tuned for benchmark problems in 5 dimensions using 10000 fitness evaluations.
parameters_5dim_1000eval = [50.0, -0.3085, 2.0273]
# Parameters tuned for benchmark problems in 5 dimensions using 10000 fitness evaluations.
parameters_5dim_10000eval = [96.0, -0.3675, 4.1710]
# Parameters tuned for benchmark problems in 10 dimensions using 2000 fitness evaluations.
parameters_10dim_2000eval = [60.0, -0.2700, 2.9708]
# Parameters tuned for benchmark problems in 10 dimensions using 20000 fitness evaluations.
parameters_10dim_20000eval = [116.0, -0.3518, 3.8304]
# Parameters tuned for benchmark problems in 20 dimensions using 40000 fitness evaluations.
parameters_20dim_40000eval = [228.0, -0.3747, 4.2373]
# Parameters tuned for benchmark problems in 20 dimensions using 400000 fitness evaluations.
parameters_20dim_400000eval = [125.0, -0.2575, 4.6713]
# Parameters tuned for benchmark problems in 30 dimensions using 600000 fitness evaluations.
parameters_30dim_60000eval = [198.0, -0.2723, 3.8283]
# Parameters tuned for benchmark problems in 50 dimensions using 100000 fitness evaluations.
parameters_50dim_100000eval = [290.0, -0.3067, 3.6223]
# Parameters tuned for benchmark problems in 100 dimensions using 200000 fitness evaluations.
parameters_100dim_200000eval = [219.0, -0.1685, 3.9162]
def __init__(self, parameters=parameters_default, *args, **kwargs):
"""
Create object instance and perform a single optimization run using MOL.
:param problem: The problem to be optimized. Instance of Problem-class.
:param parameters:
Control parameters for the MOL.
These may have a significant impact on the optimization performance.
First try and use the default parameters and if they don't give satisfactory
results, then experiment with other the parameters.
:return:
Object instance. Get the optimization results from the object's variables.
- best is the best-found solution.
- best_fitness is the associated fitness of the best-found solution.
- fitness_trace is an instance of the FitnessTrace-class.
"""
# Unpack control parameters.
self.num_particles, self.omega, self.phi_g = parameters
# The number of particles must be an integer.
self.num_particles = int(self.num_particles)
# Initialize parent-class which also starts the optimization run.
Base.__init__(self, *args, **kwargs)
def _update_particles(self):
"""
Update the velocities and positions for all particles.
This does not update the fitness for each particle.
"""
# Random values between zero and one. One random value per particle.
rand_g = tools.rand_uniform(size=self.num_particles)
# Update velocity for all particles using numpy operations.
# For an explanation of this formula, see the research papers referenced above.
# Note that self.best is the swarm's best-known position aka. global-best.
self.velocity = (self.omega * self.velocity.T \
+ self.phi_g * rand_g * (self.best - self.particle).T).T
# Fix de-normalized floating point values which can make the execution very slow.
self.velocity = tools.denormalize_trunc(self.velocity)
# Bound velocity.
self.velocity = tools.bound(self.velocity, self.velocity_lower_bound, self.velocity_upper_bound)
# Update particle positions in the search-space by adding the velocity.
self.particle = self.particle + self.velocity
# Bound particle position to search-space.
self.particle = tools.bound(self.particle, self.problem.lower_bound, self.problem.upper_bound)
##################################################
|
py | b406bbb665f2c087c0af3c92a5aec0bca85023c7 | # -*- coding: utf-8 -*-
"""Module providing controlpanels"""
import datetime
import json
import time
import six
from Products.Five import BrowserView
from Products.statusmessages.interfaces import IStatusMessage
from ade25.widgets.config import PKG_WIDGETS
from plone.app.registry.browser.controlpanel import RegistryEditForm
from plone.autoform import form
from plone.autoform import directives as form_directives
from zope import schema
from zope.interface import Interface
from plone.z3cform import layout
from plone.app.registry.browser.controlpanel import ControlPanelFormWrapper
from ade25.widgets import utils as widget_utils
from ade25.widgets import MessageFactory as _
class Ade25WidgetsSettings(BrowserView):
""" Ade25 settings overview """
def update(self):
if super(Ade25WidgetsSettings, self).update():
if 'form.button.setup' in self.request.form:
self.processSetup()
def processSetup(self):
IStatusMessage(self.request).addStatusMessage(
_(u'Setup initialized.'), 'info')
class IAde25WidgetsControlPanel(Interface):
content_widgets_header = schema.List(
title=_(u"Content Widgets Page Header"),
description=_(u"Select Content Widgets that should be available "
u"for the page header section."),
value_type=schema.Choice(
vocabulary='ade25.widgets.vocabularies.AvailableContentWidgets'
),
required=False
)
content_widgets_main = schema.List(
title=_(u"Content Widgets Main Content Area"),
description=_(u"Select Content Widgets that should be available "
u"for the main page content area."),
value_type=schema.Choice(
vocabulary='ade25.widgets.vocabularies.AvailableContentWidgets'
),
required=False
)
content_widgets_footer = schema.List(
title=_(u"Content Widgets Page Footer"),
description=_(u"Select Content Widgets that should be available "
u"for the page header section."),
value_type=schema.Choice(
vocabulary='ade25.widgets.vocabularies.AvailableContentWidgets'
),
required=False
)
widget_settings = schema.Text(
title=_(u"Widget Settings JSON"),
description=_(u"Widget configuration registry storing a string "
u"representation of a valid JSON settings array"),
required=False,
)
class Ade25WidgetsControlPanelForm(RegistryEditForm):
schema = IAde25WidgetsControlPanel
schema_prefix = "ade25.widgets"
label = u'Ade25 Widgets'
Ade25WidgetsSettingsBase = layout.wrap_form(
Ade25WidgetsControlPanelForm,
ControlPanelFormWrapper
)
class IAde25WidgetsControlPanelWidgets(Interface):
read_more_icon = schema.TextLine(
title=_(u"Read More Icon Name"),
description=_(u"Please enter icon to be used in read more links when "
u"a layout with icon is selected. Note: the icon needs to "
u"exist in the themes icon sprite for this to work."),
default=u'chevron',
required=False
)
form_directives.widget('listing_scale', klass='js-choices-selector')
listing_scale = schema.Choice(
title=_(u"Content Listing: Image Scale"),
vocabulary='ade25.widgets.vocabularies.AvailableImageScales',
default=u'ratio-4:3',
required=False
)
listing_hidden_fields = schema.List(
title=_(u"Content Listing: Hidden Elements"),
description=_(u"Please select which elements should be hidden in the "
u"widget add and edit forms."),
value_type=schema.Choice(
vocabulary='ade25.widgets.vocabularies.ContentWidgetSchemaOptions'
),
default=['text', 'link', ],
required=False
)
form_directives.widget('listing_cards_scale', klass='js-choices-selector')
listing_cards_scale = schema.Choice(
title=_(u"Content Listing Cards: Image Scale"),
vocabulary='ade25.widgets.vocabularies.AvailableImageScales',
default=u'ratio-4:3',
required=False
)
listing_cards_hidden_fields = schema.List(
title=_(u"Content Listing Cards: Hidden Elements"),
description=_(u"Please select which elements should not be available in the "
u"widget add and edit forms."),
value_type=schema.Choice(
vocabulary='ade25.widgets.vocabularies.ContentWidgetSchemaOptions'
),
default=['text', 'link', ],
required=False
)
form_directives.widget('image_cover_scale', klass='js-choices-selector')
image_cover_scale = schema.Choice(
title=_(u"Cover Image: Image Scale"),
vocabulary='ade25.widgets.vocabularies.AvailableImageScales',
default=u'ratio-4:3',
required=False
)
form_directives.widget('image_poster_scale', klass='js-choices-selector')
image_poster_scale = schema.Choice(
title=_(u"Poster Image: Image Scale"),
vocabulary='ade25.widgets.vocabularies.AvailableImageScales',
default=u'ratio-16:9',
required=False
)
image_poster_hidden_fields = schema.List(
title=_(u"Poster Image: Hidden Elements"),
description=_(u"Please select which elements should be available in the "
u"widget add and edit forms."),
value_type=schema.Choice(
vocabulary='ade25.widgets.vocabularies.ContentWidgetSchemaOptions'
),
default=['text', 'link', ],
required=False
)
class Ade25WidgetsControlPanelWidgetsForm(RegistryEditForm):
schema = IAde25WidgetsControlPanelWidgets
schema_prefix = "ade25.widgets"
label = u'Ade25 Widgets Settings'
Ade25WidgetsSettingsWidgets = layout.wrap_form(
Ade25WidgetsControlPanelWidgetsForm,
ControlPanelFormWrapper
)
class Ade25WidgetsSettingsJSON(BrowserView):
""" Ade25 settings json export """
def __call__(self):
return self.render()
@staticmethod
def _widget_configuration():
content_widgets = PKG_WIDGETS
return content_widgets
def render(self):
msg = _(u"JSON file could not be generated")
data = {
'success': False,
'message': msg
}
configuration = self._widget_configuration()
if configuration:
data = configuration
widgets = {
"items": data,
"timestamp": six.text_type(int(time.time())),
"updated": datetime.datetime.now().isoformat()
}
self.request.response.setHeader('Content-Type',
'application/json; charset=utf-8')
return json.dumps(widgets)
|
py | b406bc4edabce57df0c30d7f9e6db02db62ccd15 | #!usr/bin/python
# -*- coding: utf-8 -*-
"""
Implementation of Res2Net with extended modifications (Res2Net-Plus):
Improvements: 3x3 stem instead of 7x7, BN before activation, Mish activation instead of ReLU
this file: https://github.com/lessw2020/res2net-plus
all based on original paper and impl:
https://arxiv.org/abs/1904.01169v2
then based on https://github.com/gasvn/Res2Net
then based on:
https://github.com/frgfm/Holocron/blob/master/holocron/models/res2net.py
and finally:
https://github.com/lessw2020/res2net-plus
"""
import torch
import torch.nn as nn
from torchvision.models.resnet import conv1x1, conv3x3
from torchvision.models.utils import load_state_dict_from_url
from fastai.torch_core import *
import torch.nn as nn
import torch,math,sys
import torch.utils.model_zoo as model_zoo
from functools import partial
#from ...torch_core import Module
from fastai.torch_core import Module
import torch.nn.functional as F #(uncomment if needed,but you likely already have it)
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x *( torch.tanh(F.softplus(x)))
act_fn = Mish()
def conv(ni, nf, ks=3, stride=1, bias=False):
return nn.Conv1d(ni, nf, kernel_size=ks, stride=stride, padding=ks//2, bias=bias)
class Res2Block(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=4, dilation=1, scale=4, first_block=False, norm_layer=None):
"""Implements a residual block
Args:
inplanes (int): input channel dimensionality
planes (int): output channel dimensionality
stride (int): stride used for conv3x3
downsample (torch.nn.Module): module used for downsampling
groups: num of convolution groups
base_width: base width
dilation (int): dilation rate of conv3x3
scale (int): scaling ratio for cascade convs
first_block (bool): whether the block is the first to be placed in the conv layer
norm_layer (torch.nn.Module): norm layer to be used in blocks
"""
super(Res2Block, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm1d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv(inplanes, width * scale, 1)
self.bn1 = norm_layer(width * scale)
# If scale == 1, single conv else identity & (scale - 1) convs
nb_branches = max(scale, 2) - 1
if first_block:
self.pool = nn.AvgPool1d(kernel_size=3, stride=stride, padding=1)
self.convs = nn.ModuleList([conv(width, width, 3, stride)
for _ in range(nb_branches)])
self.bns = nn.ModuleList([norm_layer(width) for _ in range(nb_branches)])
self.first_block = first_block
self.scale = scale
self.conv3 = conv(width * scale, planes * self.expansion, 1)
self.relu = Mish() #nn.ReLU(inplace=False)
self.bn3 = norm_layer(planes * self.expansion) #bn reverse
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out) #bn reverse
# Chunk the feature map
xs = torch.chunk(out, self.scale, dim=1)
# Initialize output as empty tensor for proper concatenation
y = 0
for idx, conv in enumerate(self.convs):
# Add previous y-value
if self.first_block:
y = xs[idx]
else:
y += xs[idx]
y = conv(y)
y = self.relu(self.bns[idx](y))
# Concatenate with previously computed values
out = torch.cat((out, y), 1) if idx > 0 else y
# Use last chunk as x1
if self.scale > 1:
if self.first_block:
out = torch.cat((out, self.pool(xs[len(self.convs)])), 1)
else:
out = torch.cat((out, xs[len(self.convs)]), 1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def conv_layer(ni, nf, ks=3, stride=1, zero_bn=False, act=True):
bn = nn.BatchNorm1d(nf)
nn.init.constant_(bn.weight, 0. if zero_bn else 1.)
if act:
layers = [conv(ni, nf, ks, stride=stride), act_fn, bn]
else:
layers = [conv(ni, nf, ks, stride=stride), bn]
#if act: layers.append(act_fn)
return nn.Sequential(*layers)
class Res2Net(nn.Module):
"""Implements a Res2Net model as described in https://arxiv.org/pdf/1904.01169.pdf
Args:
block (torch.nn.Module): class constructor to be used for residual blocks
layers (list<int>): layout of layers
num_classes (int): number of output classes
zero_init_residual (bool): whether the residual connections should be initialized at zero
groups (int): number of convolution groups
width_per_group (int): number of channels per group
scale (int): scaling ratio within blocks
replace_stride_with_dilation (list<bool>): whether stride should be traded for dilation
norm_layer (torch.nn.Module): norm layer to be used
"""
def __init__(self, block, layers, c_in=3,num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=26, scale=4, replace_stride_with_dilation=None,
norm_layer=None):
super(Res2Net, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm1d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.scale = scale
#self.conv1 = nn.Conv1d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
# bias=False)
#modify stem
#stem = []
sizes = [c_in,32,64,64] #modified per Grankin
#for i in range(3):
# stem.append(conv_layer(sizes[i], sizes[i+1], stride=2 if i==0 else 1))
#stem (initial entry layers)
self.conv1 = conv_layer(c_in, sizes[1], stride=2)
self.conv2 = conv_layer(sizes[1],sizes[2])
self.conv3 = conv_layer(sizes[2],sizes[3])
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
#nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(self.inplanes, planes * block.expansion, 1, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, self.scale, first_block=True, norm_layer=norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
scale=self.scale, first_block=False, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
#stem layers
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.maxpool(x)
#res2 block layers
x = self.layer1(x)
# print('1: ', x.shape)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def create_res2net(ni, nout, layers=[3, 4, 6, 3], scale=4, width=26):
return Res2Net(Res2Block, layers, c_in=ni, num_classes=nout, scale=scale, width_per_group=width) |
py | b406bd75077956e353321adb2f72dcb920a9fbe8 | import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super().__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super().__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(
input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True,
)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
assert (
features.shape[0] == captions.shape[0]
), "Batch sizes are different for features and captions."
embeddings = self.word_embeddings(captions[:, :-1])
list_of_inputs = torch.cat((features, embeddings), dim=1)
list_of_outputs, _ = self.lstm(list_of_inputs, None)
return self.fc(list_of_outputs)
def sample(self, features, hidden=None, max_len=20):
"""
accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len)
"""
inputs = features
token_ids = []
for _ in range(max_len):
outputs, hidden = self.lstm(inputs, hidden)
outputs = self.fc(outputs)
_, token_id = outputs.max(2)
token_ids.append(token_id.item())
inputs = self.word_embeddings(token_id)
return token_ids
|
py | b406be8c1206e4404546859a1290805cb0459e7f | import pytest
from lxml import etree
from ...services.xsd.constants import TOOL_XSD_FILE
from ...services.xsd.validation import GalaxyToolValidationService
from .sample_data import (
TEST_INVALID_TOOL_01_DOCUMENT,
TEST_MACRO_01_DOCUMENT,
TEST_SYNTAX_ERROR_MACRO_01_DOCUMENT,
TEST_SYNTAX_ERROR_TOOL_01_DOCUMENT,
TEST_TOOL_01_DOCUMENT,
)
from .utils import TestUtils
TEST_SERVER_NAME = "Test Server"
@pytest.fixture(scope="module")
def xsd_schema() -> etree.XMLSchema:
root = etree.parse(str(TOOL_XSD_FILE))
schema = etree.XMLSchema(root)
return schema
class TestGalaxyToolValidationServiceClass:
def test_validate_document_returns_empty_diagnostics_when_valid(self, xsd_schema: etree.XMLSchema) -> None:
service = GalaxyToolValidationService(TEST_SERVER_NAME, xsd_schema)
xml_document = TestUtils.from_document_to_xml_document(TEST_TOOL_01_DOCUMENT)
actual = service.validate_document(xml_document)
assert actual == []
def test_validate_macro_file_returns_empty_diagnostics_when_valid(self, xsd_schema: etree.XMLSchema) -> None:
service = GalaxyToolValidationService(TEST_SERVER_NAME, xsd_schema)
xml_document = TestUtils.from_document_to_xml_document(TEST_MACRO_01_DOCUMENT)
actual = service.validate_document(xml_document)
assert actual == []
def test_validate_document_returns_diagnostics_when_invalid(self, xsd_schema: etree.XMLSchema) -> None:
service = GalaxyToolValidationService(TEST_SERVER_NAME, xsd_schema)
xml_document = TestUtils.from_document_to_xml_document(TEST_INVALID_TOOL_01_DOCUMENT)
actual = service.validate_document(xml_document)
assert len(actual) > 0
def test_validate_document_returns_diagnostics_when_syntax_error(self, xsd_schema: etree.XMLSchema) -> None:
service = GalaxyToolValidationService(TEST_SERVER_NAME, xsd_schema)
xml_document = TestUtils.from_document_to_xml_document(TEST_SYNTAX_ERROR_TOOL_01_DOCUMENT)
actual = service.validate_document(xml_document)
assert len(actual) == 1
def test_validate_macro_file_returns_diagnostics_when_syntax_error(self, xsd_schema: etree.XMLSchema) -> None:
service = GalaxyToolValidationService(TEST_SERVER_NAME, xsd_schema)
xml_document = TestUtils.from_document_to_xml_document(TEST_SYNTAX_ERROR_MACRO_01_DOCUMENT)
actual = service.validate_document(xml_document)
assert len(actual) == 1
|
py | b406beb82c0d075e36dd7c3cd514fa973635b7ec | import os
import stat
import struct
import time
import zlib
from zipfile import ZipFile as BaseZipfile, ZipInfo, ZIP_STORED, ZIP64_LIMIT, \
ZIP_DEFLATED, LargeZipFile, crc32, \
_ZipDecrypter
randomFunc = os.urandom
class _ZipEncrypter(_ZipDecrypter):
def __call__(self, c):
"""Encrypt a single character."""
_c = ord(c)
k = self.key2 | 2
_c = _c ^ (((k * (k ^ 1)) >> 8) & 255)
_c = chr(_c)
self._UpdateKeys(c) # this is the only line that actually changed
return _c
class ZipFile(BaseZipfile):
def write(self, filename, arcname=None, compress_type=None, pwd=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if isdir:
zinfo.compress_type = ZIP_STORED
elif compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
pwd = pwd or self.pwd
if pwd:
zinfo.flag_bits |= 0x8 | 0x1 # set stream and encrypted
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
if pwd:
ze = _ZipEncrypter(pwd)
encrypt = lambda x: "".join(map(ze, x))
zinfo._raw_time = (
zinfo.date_time[3] << 11
| zinfo.date_time[4] << 5
| (zinfo.date_time[5] // 2))
check_byte = (zinfo._raw_time >> 8) & 0xff
enryption_header = randomFunc(11) + chr(check_byte)
self.fp.write(encrypt(enryption_header))
else:
encrypt = lambda x: x
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(encrypt(buf))
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(encrypt(buf))
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
if pwd:
# Write CRC and file sizes after the file data
zinfo.compress_size += 12
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(
fmt, zinfo.CRC, zinfo.compress_size, zinfo.file_size))
self.fp.flush()
else:
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes, compress_type=None, pwd=None):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
pwd = pwd or self.pwd
if pwd:
zinfo.flag_bits |= 0x01
zinfo.compress_size += 12 # 12 extra bytes for the header
if zinfo.flag_bits & 0x8:
zinfo._raw_time = (
zinfo.date_time[3] << 11
| zinfo.date_time[4] << 5
| (zinfo.date_time[5] // 2))
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
check_byte = (zinfo.CRC >> 24) & 0xff
enryption_header = randomFunc(11) + chr(check_byte)
ze = _ZipEncrypter(pwd)
bytes = "".join(map(ze, enryption_header + bytes))
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(bytes)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
|
py | b406bf498fb04f3c28363041a790736f019922fa | #!/usr/bin/env python3
""" An attempt to solve the Conversation Log problem on Kattis """
import sys
import logging
logging.basicConfig(level=logging.INFO)
ignored = sys.stdin.readline()
all_unique_words = set()
words = dict()
wordcount = dict()
for line in sys.stdin:
data = line.rstrip().split(" ")
if data[0] not in words:
words[data[0]] = set()
for index in range(1, len(data)):
try:
wordcount[data[index]] += 1
except KeyError:
wordcount[data[index]] = 1
finally:
words[data[0]].add(data[index])
all_unique_words.add(data[index])
logging.info("words: {}\nwordcount: {}\nall_unique_words: {}".format(
words, wordcount, all_unique_words))
for person in words:
all_unique_words.intersection_update(words[person])
logging.info("all_unique_words:{}".format(all_unique_words))
if all_unique_words:
results = [[i, wordcount[i]] for i in all_unique_words]
sorted_results = sorted(results, key=lambda x: (-x[1], x[0]))
for result in sorted_results:
print(result[0])
else:
print("ALL CLEAR")
|
py | b406bf812b2157bac582f01ee22bb7e5ad95372e | #!/usr/bin/env python
import os
import sys
import glob
# Try and import pip. We'll stop if it is not present
try:
import pip
except ImportError:
print "Installation of SeqFindr requires pip. Please install it! See -"
print "http://pip.readthedocs.org/en/latest/installing.html"
sys.exit(1)
from setuptools import setup
__title__ = 'SeqFindr'
__version__ = '0.35.0'
__description__ = "A tool to easily create informative genomic feature plots"
__author__ = 'Mitchell Stanton-Cook'
__license__ = 'ECL 2.0'
__author_email__ = "[email protected]"
__url__ = 'http://github.com/mscook/SeqFindr'
# Helper functions
if sys.argv[-1] == 'publish':
print "Please use twine or do_release.sh"
sys.exit()
if sys.argv[-1] == 'clean':
os.system('rm -rf SeqFindr.egg-info build dist')
sys.exit()
if sys.argv[-1] == 'docs':
os.system('cd docs && make html')
sys.exit()
packages = [__title__, ]
requires = []
with open('requirements.txt') as fin:
lines = fin.readlines()
for line in lines:
requires.append(line.strip())
# Build lists to package the docs
html, sources, static = [], [], []
html_f = glob.glob('docs/_build/html/*')
accessory = glob.glob('docs/_build/html/*/*')
for f in html_f:
if os.path.isfile(f):
html.append(f)
for f in accessory:
if f.find("_static") != -1:
if os.path.isfile(f):
static.append(f)
elif f.find("_sources"):
if os.path.isfile(f):
sources.append(f)
setup(
name=__title__,
version=__version__,
description=__description__,
long_description=open('README.rst').read(),
author=__author__,
author_email=__author_email__,
url=__url__,
packages=packages,
test_suite="tests",
package_dir={__title__: __title__},
scripts=[__title__+'/'+__title__, __title__+'/vfdb_to_seqfindr'],
package_data={},
data_files=[('', ['LICENSE', 'requirements.txt', 'README.rst']),
('docs', html), ('docs/_static', static),
('docs/_sources', sources)],
include_package_data=True,
install_requires=requires,
license=__license__,
zip_safe=False,
classifiers=('Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',),
)
|
py | b406c08bb20e2827afc53494cfb86cb60d6faddf | # Enter your code here. Read input from STDIN. Print output to STDOUT
c,a = int(input()), input().split()
d,b = int(input()), input().split()
x=set(a)
y=set(b)
m=x.difference(y)
n=y.difference(x)
o=m.union(n)
print('\n'.join(sorted(o, key=int))) |
py | b406c0ac4821d42a06f4e1b14bcd547a4313e10f | from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from django.template.context import Context
from django.conf import settings
import json
import pkg_resources
from rest_framework.views import APIView
from accounts.models import User
def index(request):
return render(request, 'index.html', Context({
'dev_shared_key': settings.VAULTIER.get('dev_shared_key'),
}))
class ConfigView(APIView):
"""
View to provide JS configuration
"""
def get(self, request):
"""
Get configuration from settings, format it and return
"""
# get settings and transform it to json
conf_settings = json.dumps({
'VERSION': pkg_resources.get_distribution("Vaultier").version,
'raven_key': settings.VAULTIER.get('raven_key'),
'invitation_lifetime': settings.VAULTIER.get(
'invitation_lifetime'),
'registration_allow': settings.VAULTIER.get('registration_allow'),
'registration_enforce': not bool(User.objects.all().count()),
# dev
'dev_shared_key': settings.VAULTIER.get('dev_shared_key'),
'dev_show_token': settings.VAULTIER.get('dev_show_token'),
'dev_email': settings.VAULTIER.get('dev_email')
})
# add settings to script
script = 'InitializeConfig = function(app) { ' \
'app.Config = Ember.Object.extend(%s); }' % conf_settings
return HttpResponse(script, content_type='text/javascript')
def error404(request):
return redirect('/#'+request.path)
# def dev_mail(request):
# context = build_context(Member.objects.filter(
# status=MemberStatusField.STATUS_INVITED).reverse()[0])
# plain, html = render_email('mailer/invitation', context)
# return HttpResponse(html)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.