max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
wlscrape.py | bontchev/wlscrape | 110 | 12653902 | <gh_stars>100-1000
#!/usr/bin/env python
from __future__ import print_function
from lxml import html
import itertools
import argparse
import requests
import json
import wget
import sys
import os
__author__ = "<NAME> <<EMAIL>>"
__license__ = "GPL"
__VERSION__ = "1.07"
site = "https://wikileaks.org"
area = "/akp-emails/"
def error(e):
print("Error: %s." % e, file=sys.stderr)
sys.exit(-1)
def processData(tree, ext, blacklist):
try:
links = tree.xpath("//*[@id='searchresult']/tbody/tr/td[2]/a/@href")
sizes = tree.xpath("//*[@id='searchresult']/tbody/tr/td[3]/text()")
md5s = tree.xpath("//*[@id='searchresult']/tbody/tr/td[4]/text()")
except Exception as e:
error(e)
data = []
for i in range(len(links)):
theUrl = site + area + links[i]
if (not theUrl in blacklist):
data.append({"md5" : md5s[i], "url" : theUrl, "ext" : ext, "size" : sizes[i]})
return data
def processExtension(ext, blacklist, options):
unique = options.unique
pageNums = options.pages
url = site + area + "?file=" + ext
if (options.spam):
url += "&spam=1"
if (options.duplicates):
url += "&dupl=1"
url += "&count=" + str(options.elements) + "&page=1&#searchresult"
print(url, file=sys.stderr)
pageNum = 1
theData = []
seen = set()
try:
page = requests.get(url)
tree = html.fromstring(page.content)
if ((len(pageNums) == 0) or (pageNum in pageNums)):
newData = processData(tree, ext, blacklist)
if (unique):
for element in newData:
if (not element["md5"] in seen):
seen.add(element["md5"])
theData.append(element)
else:
theData += newData
#nextButtonXPath = "//*[@id='right-pane']/div[5]/div/ul/li[last()]/a/@href"
nextButtonXPath = "//*[@id='right-pane']/div[3]/div/ul/li[last()]/a/@href"
next = tree.xpath(nextButtonXPath)
while len(next):
url = site + next[0]
print (url, file=sys.stderr)
pageNum += 1
page = requests.get(url)
tree = html.fromstring(page.content)
if ((len(pageNums) == 0) or (pageNum in pageNums)):
newData = processData(tree, ext, blacklist)
if (unique):
for element in newData:
if (not element["md5"] in seen):
seen.add(element["md5"])
theData.append(element)
else:
theData += newData
next = tree.xpath(nextButtonXPath)
except Exception as e:
error(e)
return theData
def makeOutputDir(pageNum):
outputDir = str(pageNum).zfill(3)
try:
if (not os.path.exists(outputDir)):
os.mkdir(outputDir)
except Exception as e:
error(e)
return outputDir
def printTheData(theData, options):
md5only = options.md5only
download = options.download
elementsPerPage = options.elements
print("Number of files found: %d." % len(theData), file=sys.stderr)
try:
jsonData = json.dumps(theData, indent=4)
if (not md5only and not download):
print(jsonData)
else:
if (download):
paginate = False
outputDir = ""
elementNum = 1
if (len(theData) > elementsPerPage):
paginate = True
pageNum = 1
outputDir = makeOutputDir(pageNum)
for element in theData:
md5 = element["md5"].upper()
if (md5only):
print(md5)
if (download):
fileName = md5 + "." + element["ext"]
if (paginate):
if (elementNum > elementsPerPage):
elementNum = 1
pageNum += 1
outputDir = makeOutputDir(pageNum)
fileName = os.path.join(outputDir, fileName)
outputFile = wget.download(element["url"], out=fileName)
elementNum += 1
except Exception as e:
error(e)
def getList(argument):
return list(set(itertools.chain.from_iterable([range(int(y[0]), int(y[1]) + 1) for y in [(x.split('-') + [x])[:2] for x in argument.split(',')]])))
def main(args):
theData = []
theBlacklist = []
if (args.blacklist != None):
try:
with open(args.blacklist, "r") as blacklistFile:
theBlacklist = [line.split()[0] for line in blacklistFile.read().split('\n') if line.strip()]
except Exception as e:
error(e)
if (args.elements < 10):
args.elements = 10
if (args.elements > 200):
args.elements = 200
for ext in args.ext:
theData += processExtension(ext, theBlacklist, args)
printTheData(theData, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(version="%(prog)s version " + __VERSION__,
description="Scapes suspected malware from Wikileaks.")
parser.add_argument("-m", "--md5only", action="store_true", help="only list the hashes")
parser.add_argument("-d", "--download", action="store_true", help="download the files")
parser.add_argument("-s", "--spam", action="store_true", help="look in the spam folder too")
parser.add_argument("-p", "--duplicates", action="store_true", help="include duplicates")
parser.add_argument("-u", "--unique", action="store_true", help="fetch only one entry per unique MD5")
parser.add_argument("-e", "--elements", type=int, default=200, help="elements per page")
parser.add_argument("-b", "--blacklist", dest="blacklist", default=None, help="blacklist file")
parser.add_argument("-a", "--pages", type=getList, dest="pages", default=[], help="list of pages to process")
parser.add_argument("ext", nargs="+", help="file extension")
args = parser.parse_args()
main(args)
sys.exit(0)
|
server/jobs/assign_grading_queues.py | okpy/ok | 148 | 12653908 | from server import jobs, utils
from server.constants import STAFF_ROLES
from server.models import Assignment, GradingTask, User
@jobs.background_job
def assign_grading_queues(assignment_id, staff, kind):
logger = jobs.get_job_logger()
cid = jobs.get_current_job().course_id
assign = Assignment.query.filter_by(id=assignment_id, course_id=cid).one()
selected_users = []
for hash_id in staff:
user = User.get_by_id(utils.decode_id(hash_id))
if user and user.is_enrolled(cid, roles=STAFF_ROLES):
selected_users.append(user)
# Available backups
data = assign.course_submissions()
backups = set(b['backup']['id'] for b in data if b['backup'])
tasks = GradingTask.create_staff_tasks(backups, selected_users, assignment_id, cid, kind)
logger.info("{} created!".format(tasks))
|
FWCore/Integration/test/testSeriesOfProcessesPROD_cfg.py | ckamtsikis/cmssw | 852 | 12653922 |
# This configuration is designed to be run as the second
# in a series of cmsRun processes. The process it configures
# will read a file in streamer format and produces two root
# files.
# For later event selection tests these paths are run:
# path p1 1:25 pass
# path p2 pass 51:60
# Checks the path names returned by the TriggerNames
# service.
# Multiple products are put in the event for use
# in subsequent processes.
# Two output files are created, one contains some
# fake raw data, the other contains some fake
# HLTDebug data (actual just dummy products containing
# an int, just for test purposes)
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
import FWCore.Framework.test.cmsExceptionsFatalOption_cff
process.options = cms.untracked.PSet(
# wantSummary = cms.untracked.bool(True),
Rethrow = FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow
)
process.source = cms.Source("NewEventStreamFileReader",
fileNames = cms.untracked.vstring('file:testSeriesOfProcessesHLT.dat')
)
process.f1 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(25),
onlyOne = cms.untracked.bool(False)
)
process.f2a = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(50),
onlyOne = cms.untracked.bool(False)
)
process.f2b = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(10),
onlyOne = cms.untracked.bool(False)
)
process.a = cms.EDAnalyzer(
"TestTriggerNames",
trigPathsPrevious = cms.untracked.vstring(
'p01',
'p02',
'p03',
'p04'
),
streamerSource = cms.untracked.bool(True),
trigPaths = cms.untracked.vstring('p1', 'p2'),
dumpPSetRegistry = cms.untracked.bool(False)
)
# This puts products in the lumi's and run's. One failure
# mode of the maxLuminosityBlock parameter is tested by their
# mere existence.
process.makeRunLumiProducts = cms.EDProducer("ThingWithMergeProducer")
# In the next process we want to test input from a secondary input
# file so we split the products over 2 output files.
process.out1 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testSeriesOfProcessesPROD1.root'),
outputCommands = cms.untracked.vstring(
"drop *",
"keep *_fakeRaw_*_*"
)
)
process.out2 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testSeriesOfProcessesPROD2.root'),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_fakeRaw_*_*"
)
)
process.pathanalysis = cms.EDAnalyzer("PathAnalyzer")
process.p1 = cms.Path(process.f1 * process.makeRunLumiProducts)
process.p2 = cms.Path(~process.f2a * process.f2b)
process.e = cms.EndPath(process.a * process.pathanalysis * process.out1 * process.out2)
|
python/pycylon/examples/dataframe/concat.py | deHasara/cylon | 229 | 12653930 | <reponame>deHasara/cylon<filename>python/pycylon/examples/dataframe/concat.py
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import random
import pycylon as cn
from pycylon import DataFrame, CylonEnv
from pycylon.net import MPIConfig
df1 = DataFrame([random.sample(range(10, 100), 5),
random.sample(range(10, 100), 5)])
df2 = DataFrame([random.sample(range(10, 100), 5),
random.sample(range(10, 100), 5)])
df3 = DataFrame([random.sample(range(10, 100), 10),
random.sample(range(10, 100), 10)])
# local unique
df4 = cn.concat(axis=0, objs=[df1, df2, df3])
print("Local concat axis0")
print(df4)
df2.rename(['00', '11'])
df3.rename(['000', '111'])
df4 = cn.concat(axis=1, objs=[df1, df2, df3])
print("Local concat axis1")
print(df4)
# distributed unique
env = CylonEnv(config=MPIConfig())
df1 = DataFrame([random.sample(range(10 * env.rank, 15 * (env.rank + 1)), 5),
random.sample(range(10 * env.rank, 15 * (env.rank + 1)), 5)])
df2 = DataFrame([random.sample(range(10 * env.rank, 15 * (env.rank + 1)), 5),
random.sample(range(10 * env.rank, 15 * (env.rank + 1)), 5)])
df3 = DataFrame([random.sample(range(10 * env.rank, 15 * (env.rank + 1)), 10),
random.sample(range(10 * env.rank, 15 * (env.rank + 1)), 10)])
print("Distributed concat axis0", env.rank)
df4 = cn.concat(axis=0, objs=[df1, df2, df3], env=env)
print(df4)
df2.rename(['00', '11'])
df3.rename(['000', '111'])
df4 = cn.concat(axis=1, objs=[df1, df2, df3], env=env)
print("Distributed concat axis1", env.rank)
print(df4)
env.finalize()
|
core/alert.py | murtazakan/Nettacker | 884 | 12653932 | <filename>core/alert.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from core import color
from core.messages import load_message
from core.time import now
message_cache = load_message().messages
def run_from_api():
"""
check if framework run from API to prevent any alert
Returns:
True if run from API otherwise False
"""
return "--start-api" in sys.argv
def verbose_mode_is_enabled():
return '--verbose' in sys.argv or '-v' in sys.argv
def event_verbose_mode_is_enabled():
return '--verbose-event' in sys.argv
def messages(msg_id):
"""
load a message from message library with specified language
Args:
msg_id: message id
Returns:
the message content in the selected language if
message found otherwise return message in English
"""
return message_cache[str(msg_id)]
def info(content):
"""
build the info message, log the message in database if requested,
rewrite the thread temporary file
Args:
content: content of the message
Returns:
None
"""
if not run_from_api():
sys.stdout.buffer.write(
bytes(
color.color("yellow")
+ "[{0}][+] ".format(now())
+ color.color("green")
+ content
+ color.color("reset")
+ "\n",
"utf8",
)
)
sys.stdout.flush()
def verbose_event_info(content):
"""
build the info message, log the message in database if requested,
rewrite the thread temporary file
Args:
content: content of the message
Returns:
None
"""
if (not run_from_api()) and (
verbose_mode_is_enabled() or event_verbose_mode_is_enabled()
): # prevent to stdout if run from API
sys.stdout.buffer.write(
bytes(
color.color("yellow")
+ "[{0}][+] ".format(now())
+ color.color("green")
+ content
+ color.color("reset")
+ "\n",
"utf8",
)
)
sys.stdout.flush()
def success_event_info(content):
"""
build the info message, log the message in database if requested,
rewrite the thread temporary file
Args:
content: content of the message
Returns:
None
"""
if not run_from_api():
sys.stdout.buffer.write(
bytes(
color.color("red")
+ "[{0}][+++] ".format(now())
+ color.color("cyan")
+ content
+ color.color("reset")
+ "\n",
"utf8",
)
)
sys.stdout.flush()
def verbose_info(content):
"""
build the info message, log the message in database if requested,
rewrite the thread temporary file
Args:
content: content of the message
Returns:
None
"""
if verbose_mode_is_enabled():
sys.stdout.buffer.write(
bytes(
color.color("yellow")
+ "[{0}][+] ".format(now())
+ color.color("purple")
+ content
+ color.color("reset")
+ "\n",
"utf8",
)
)
sys.stdout.flush()
def write(content):
"""
simple print a message
Args:
content: content of the message
Returns:
None
"""
if not run_from_api():
sys.stdout.buffer.write(
bytes(content, "utf8") if isinstance(content, str) else content
)
sys.stdout.flush()
def warn(content):
"""
build the warn message
Args:
content: content of the message
Returns:
the message in warn structure - None
"""
if not run_from_api():
sys.stdout.buffer.write(
bytes(
color.color("blue")
+ "[{0}][!] ".format(now())
+ color.color("yellow")
+ content
+ color.color("reset")
+ "\n",
"utf8",
)
)
sys.stdout.flush()
def error(content):
"""
build the error message
Args:
content: content of the message
Returns:
the message in error structure - None
"""
data = (
color.color("red")
+ "[{0}][X] ".format(now())
+ color.color("yellow")
+ content
+ color.color("reset")
+ "\n"
)
sys.stdout.buffer.write(data.encode("utf8"))
sys.stdout.flush()
def write_to_api_console(content):
"""
simple print a message in API mode
Args:
content: content of the message
Returns:
None
"""
sys.stdout.buffer.write(bytes(content, "utf8"))
sys.stdout.flush()
|
harness/determined/common/declarative_argparse.py | RAbraham/determined | 1,729 | 12653945 | import functools
import itertools
from argparse import SUPPRESS, ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from typing import Any, Callable, List, NamedTuple, Optional, Tuple, cast
def make_prefixes(desc: str) -> List[str]:
parts = desc.split("|")
ret = [parts[0]]
for part in parts[1:]:
ret.append(ret[-1] + part)
return ret
def generate_aliases(spec: str) -> Tuple[str, List[str]]:
"""
Take the given string and split it by spaces. For each word, split it by
pipe characters and compute the result of joining each prefix of that
list. Return a big list containing all the results, except that the result
of joining the whole first word is pulled out.
"c|heck|out co"
=> ["c|heck|out", "co"]
=> [["c", "heck", "out"], ["co"]]
=> [["c", "check", "checkout"], ["co"]]
=> "checkout", ["c", "check", "co"]
"""
prefixes = [make_prefixes(s) for s in spec.split()]
main = prefixes[0].pop()
return main, list(itertools.chain.from_iterable(prefixes))
# Classes used to represent the structure of an argument parser setup; these
# are turned into actual `argparse` objects by `add_args`.
class Cmd:
"""Describes a subcommand."""
def __init__(
self,
name: str,
func: Optional[Callable],
help_str: str,
subs: List[Any],
is_default: bool = False,
) -> None:
"""
`subs` is a list containing `Cmd`, `Arg`, and `Group` that describes
the arguments, subcommands, and mutually exclusive argument groups
for this command.
"""
self.name = name
self.help_str = help_str
self.func = func
if self.func:
# Force the help string onto the actual function for later. This
# can be used to print the help string
self.func.__name__ = help_str
self.subs = subs
self.is_default = is_default
class Arg:
"""
Describes an argument. Arguments to the constructor are passed to
`add_argument`.
"""
def __init__(self, *args: Any, completer: Optional[Callable] = None, **kwargs: Any) -> None:
self.args = args
self.kwargs = kwargs
self.completer = completer
class Group:
"""Describes a mutually exclusive group of options."""
def __init__(self, *options: Arg, **kwargs: Any) -> None:
self.options = options
self.kwargs = kwargs
class ArgGroup:
"""
Describes a named conceptual group of options. Arguments are passed to
`add_argument_group`.
"""
def __init__(
self,
title: Optional[str] = None,
description: Optional[str] = None,
child_args: Optional[List[Arg]] = None,
) -> None:
self.title = title
self.description = description
self.child_args = child_args or []
class BoolOptArg(NamedTuple):
"""Describes a boolean --foo / --no-foo flag pair."""
true_name: str
false_name: str
dest: str
default: bool = False
true_help: Optional[str] = None
false_help: Optional[str] = None
def wrap_func(parser: ArgumentParser, func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(args: Namespace) -> Any:
args.func = func
return func(parser.parse_args([], args))
return wrapper
def help_func(parser: ArgumentParser) -> Callable:
"""
Return a function that prints help for the given parser. Using this doesn't
exit during the call to to `parse_args` itself, which would be ideal, but
only when the function from the `parse_args` result is called. It looks
about the same as long as you do the second right after the first, at
least.
"""
def inner_func(args: Namespace) -> Any:
parser.print_help()
return inner_func
def add_args(parser: ArgumentParser, description: List[Any], depth: int = 0) -> None:
"""
Populate the given parser with arguments, as specified by the
description. The description is a list of Arg, Cmd, and Group objects.
"""
subparsers = None
help_parser = None
def description_sort_key(desc: Any) -> str:
if isinstance(desc, Cmd):
return desc.name
# `sorted` is stable, so we shouldn't change the relative
# positioning of non-Cmd arg descriptions.
return ""
# Sort descriptions alphabetically by name before passing them to
# argparse. This ensures that `help` output is sorted
# alphabetically.
description = sorted(description, key=description_sort_key)
for thing in description:
if isinstance(thing, Cmd):
if subparsers is None:
metavar = "sub" * depth + "command"
subparsers = parser.add_subparsers(metavar=metavar)
# If there are any subcommands at all, also add a `help`
# subcommand.
help_parser = subparsers.add_parser("help", help="show help for this command")
help_parser.set_defaults(func=help_func(parser))
main_name, aliases = generate_aliases(thing.name)
subparser_kwargs = {
"aliases": aliases,
"formatter_class": ArgumentDefaultsHelpFormatter,
}
if thing.help_str != SUPPRESS:
subparser_kwargs["help"] = thing.help_str
subparser = subparsers.add_parser(main_name, **subparser_kwargs)
subparser.set_defaults(func=thing.func)
subparser.set_defaults(**{("_" + "sub" * depth + "command"): thing.name})
# If this is the default subcommand, make calling the parent with
# no subcommand behave the same as calling this subcommand with no
# arguments.
if thing.is_default:
thing.func = cast(Callable, thing.func)
parser.set_defaults(func=wrap_func(subparser, thing.func))
add_args(subparser, thing.subs, depth + 1)
elif isinstance(thing, Arg):
arg = parser.add_argument(*thing.args, **thing.kwargs)
arg.completer = thing.completer # type: ignore
elif isinstance(thing, Group):
group = parser.add_mutually_exclusive_group(**thing.kwargs)
for option in thing.options:
group.add_argument(*option.args, **option.kwargs)
elif isinstance(thing, ArgGroup):
arg_group = parser.add_argument_group(thing.title, thing.description)
for child_arg in thing.child_args:
arg_group.add_argument(*child_arg.args, **child_arg.kwargs)
elif isinstance(thing, BoolOptArg):
parser.add_argument(
thing.true_name, dest=thing.dest, action="store_true", help=thing.true_help
)
parser.add_argument(
thing.false_name, dest=thing.dest, action="store_false", help=thing.false_help
)
parser.set_defaults(**{thing.dest: thing.default})
# If there are any subcommands but none claimed the default action, make
# the default print help.
if subparsers is not None and parser.get_default("func") is None:
parser.set_defaults(func=help_func(parser))
|
extraPackages/pyzmq-17.1.2/examples/security/grasslands.py | dolboBobo/python3_ios | 130 | 12653990 | #!/usr/bin/env python
'''
No protection at all.
All connections are accepted, there is no authentication, and no privacy.
This is how ZeroMQ always worked until we built security into the wire
protocol in early 2013. Internally, it uses a security mechanism called
"NULL".
Author: <NAME>
'''
import zmq
ctx = zmq.Context.instance()
server = ctx.socket(zmq.PUSH)
server.bind('tcp://*:9000')
client = ctx.socket(zmq.PULL)
client.connect('tcp://127.0.0.1:9000')
server.send(b"Hello")
msg = client.recv()
if msg == b"Hello":
print("Grasslands test OK")
|
src/ralph/security/models.py | DoNnMyTh/ralph | 1,668 | 12654018 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from datetime import datetime
from dj.choices import Choices
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ralph.assets.models.base import BaseObject
from ralph.lib.mixins.models import (
AdminAbsoluteUrlMixin,
TaggableMixin,
TimeStampMixin
)
from ralph.lib.permissions import PermByFieldMixin
def any_exceeded(vulnerabilties):
return any([v.is_deadline_exceeded for v in vulnerabilties])
class ScanStatus(Choices):
_ = Choices.Choice
ok = _("ok")
fail = _("fail")
error = _("error")
class Risk(Choices):
_ = Choices.Choice
low = _("low")
medium = _("medium")
high = _("high")
class Vulnerability(
AdminAbsoluteUrlMixin,
PermByFieldMixin,
TimeStampMixin,
TaggableMixin,
models.Model,
):
_allow_in_dashboard = True
name = models.CharField(
verbose_name=_("name"),
max_length=1024,
unique=False
)
display_name = models.CharField(
verbose_name=_("display name"),
max_length=1024
)
patch_deadline = models.DateTimeField(null=True, blank=True)
risk = models.PositiveIntegerField(choices=Risk(), null=True, blank=True)
external_vulnerability_id = models.IntegerField(
unique=True, # id means id
null=True,
blank=True,
help_text=_('Id of vulnerability from external system'),
)
@property
def is_deadline_exceeded(self):
return self.patch_deadline < datetime.now()
def __str__(self):
deadline = (
self.patch_deadline.strftime('%Y-%m-%d') if
self.patch_deadline else '-'
)
return "{} ({})".format(self.name, deadline)
class SecurityScan(
AdminAbsoluteUrlMixin,
PermByFieldMixin,
TimeStampMixin,
TaggableMixin,
models.Model,
):
_allow_in_dashboard = True
last_scan_date = models.DateTimeField()
scan_status = models.PositiveIntegerField(choices=ScanStatus())
next_scan_date = models.DateTimeField()
details_url = models.URLField(max_length=255, blank=True)
rescan_url = models.URLField(blank=True, verbose_name=_('Rescan url'))
base_object = models.OneToOneField(
BaseObject,
on_delete=models.CASCADE,
)
vulnerabilities = models.ManyToManyField(Vulnerability, blank=True)
# this is a quirk field, it is updated manually (for now it's in API)
# this is because it's hard to handling it automatically
# (its value is computated depending on M2M field and M2M signals are
# complicated)
is_patched = models.BooleanField(default=False)
@property
def is_ok(self):
return self.scan_status == ScanStatus.ok.id
def update_is_patched(self):
"""Updates `is_patched` field depending on vulnerabilities"""
self.is_patched = not any_exceeded(self.vulnerabilities.all())
def __str__(self):
return "{} {} ({})".format(
self.last_scan_date.strftime('%Y-%m-%d'),
ScanStatus.from_id(self.scan_status).desc,
self.base_object.content_type,
)
|
leetcode.com/python/208_Implement_Trie_(Prefix_Tree).py | XSoyOscar/Algorithms | 713 | 12654064 | <gh_stars>100-1000
class TrieNode:
def __init__(self):
self.flag = False
self.children = {}
class Trie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: None
"""
current = self.root
for character in word:
if character not in current.children:
current.children[character] = TrieNode()
current = current.children[character]
current.flag = True
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
result, node = self.childSearch(word)
if result:
return node.flag
return False
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
result, node = self.childSearch(prefix)
return result
def childSearch(self, word):
current = self.root
for character in word:
if character in current.children:
current = current.children[character]
else:
return False, None
return True, current
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix) |
Exec/science/flame_wave/analysis/profile2.py | MargotF/Castro | 178 | 12654075 | <gh_stars>100-1000
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 100, 1000)
H = 50
delta = 2.5
T_base = 2
T_star = 1
T = T_star + 0.5*(T_base - T_star)*(1.0 + np.tanh((x - (H) - 1.5*delta)/(0.5*delta)))
plt.plot(x, T)
plt.plot([H, H], [1, T_base])
plt.plot([H+3*delta, H+3*delta], [1, T_base])
plt.xlim(H - 2*delta, H + 4*delta)
plt.savefig("profile2.png")
|
src/genie/libs/parser/ios/show_routing.py | balmasea/genieparser | 204 | 12654082 | '''show_route.py
IOS parsers for the following show commands:
* show ip route
* show ip route vrf <vrf>
* show ipv6 route
* show ipv6 route vrf <vrf>
* show ip route <Hostname or A.B.C.D>
* show ip route vrf <vrf> <Hostname or A.B.C.D>
* show ipv6 route <Hostname or 2001:DB8:64:79::C:D>
* show ipv6 route vrf <vrf> <Hostname or 2001:DB8:64:79::C:D>
* show ipv6 route updated
* show ipv6 route vrf <vrf> updated
* show ip route summary
* show ip route vrf <vrf> summary
'''
from genie.libs.parser.iosxe.show_routing import (
ShowIpv6RouteUpdated as ShowIpv6RouteUpdated_iosxe, ShowIpRouteSummary as
ShowIpRouteSummary_iosxe, ShowIpRouteDistributor as
ShowIpRouteDistributor_iosxe, ShowIpv6RouteDistributor as
ShowIpv6RouteDistributor_iosxe)
class ShowIpRouteDistributor(ShowIpRouteDistributor_iosxe):
"""distributor class for show ip route"""
pass
class ShowIpv6RouteDistributor(ShowIpv6RouteDistributor_iosxe):
"""distributor class for show ipv6 route"""
pass
class ShowIpv6RouteUpdated(ShowIpv6RouteUpdated_iosxe):
"""Parser for :
show ipv6 route updated
show ipv6 route vrf <vrf> updated"""
pass
class ShowIpRouteSummary(ShowIpRouteSummary_iosxe):
"""Parser for :
show ip route summary
show ip route vrf <vrf> summary"""
pass |
mpunet/augmentation/__init__.py | alexsosn/MultiPlanarUNet | 156 | 12654089 | from .augmenters import Elastic2D, Elastic3D
|
pyzoo/test/zoo/pipeline/inference/test_inference_model.py | limn2o4/analytics-zoo | 2,970 | 12654169 | <reponame>limn2o4/analytics-zoo
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
import numpy as np
from bigdl.dataset.base import maybe_download
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.pipeline.inference import InferenceModel
import tarfile
np.random.seed(1337) # for reproducibility
resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
property_path = os.path.join(os.path.split(__file__)[0],
"../../../../../zoo/target/classes/app.properties")
data_url = "https://s3-ap-southeast-1.amazonaws.com"
with open(property_path) as f:
for _ in range(2): # skip the first two lines
next(f)
for line in f:
if "data-store-url" in line:
line = line.strip()
data_url = line.split("=")[1].replace("\\", "")
class TestInferenceModel(ZooTestCase):
def test_load_bigdl(self):
model = InferenceModel(3)
model.load_bigdl(os.path.join(resource_path, "models/bigdl/bigdl_lenet.model"))
input_data = np.random.random([4, 28, 28, 1])
output_data = model.predict(input_data)
def test_load_caffe(self):
model = InferenceModel(10)
model.load_caffe(os.path.join(resource_path, "models/caffe/test_persist.prototxt"),
os.path.join(resource_path, "models/caffe/test_persist.caffemodel"))
input_data = np.random.random([4, 3, 8, 8])
output_data = model.predict(input_data)
def test_load_openvino(self):
local_path = self.create_temp_dir()
model = InferenceModel(1)
model_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.xml"
weight_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.bin"
model_path = maybe_download("resnet_v1_50.xml",
local_path, model_url)
weight_path = maybe_download("resnet_v1_50.bin",
local_path, weight_url)
model.load_openvino(model_path, weight_path)
input_data = np.random.random([4, 1, 224, 224, 3])
model.predict(input_data)
if __name__ == "__main__":
pytest.main([__file__])
|
qcodes/instrument_drivers/Minicircuits/USBHIDMixin.py | riju-pal/QCoDeS_riju | 223 | 12654171 | """
A mixin module for USB Human Interface Device instruments
"""
import os
import time
import struct
from typing import Optional, List, Any
try:
import pywinusb.hid as hid
except ImportError:
# We will raise a proper error when we attempt to instantiate a driver.
# Raising an exception here will cause CI to fail under Linux
hid = None
from qcodes.instrument.base import Instrument
class USBHIDMixin(Instrument):
"""
Args:
instance_id: The id of the instrument we want to connect to. If
there is only one instrument, then this argument is optional.
If more than one instrument happen to be connected, use
`enumerate_devices` method to query their IDs
timeout: Specify a timeout for this instrument in seconds
"""
# The following class attributes should be set by subclasses
vendor_id = 0x0000
product_id = 0x0000
@staticmethod
def _check_hid_import() -> None:
if os.name != 'nt':
raise ImportError("This driver only works on Windows.")
if hid is None:
raise ImportError(
"pywinusb is not installed. Please install it by typing "
"'pip install pywinusb' in a qcodes environment terminal"
)
def __init__(self, name: str, instance_id: Optional[str] = None,
timeout: float = 2,
**kwargs: Any):
self._check_hid_import()
devs = hid.HidDeviceFilter(
product_id=self.product_id,
vendor_id=self.vendor_id,
instance_id=instance_id
).get_devices()
if len(devs) == 0:
raise RuntimeError("No instruments found!")
elif len(devs) > 1:
raise RuntimeError("Multiple HID devices detected! Please supply "
"a instance id")
self._device = devs[0]
self._device.open()
self._data_buffer: Optional[bytes] = None
self._device.set_raw_data_handler(self._handler)
self._timeout = timeout
self._tries_per_second = 5
super().__init__(name, **kwargs)
def _handler(self, data: bytes) -> None:
self._data_buffer = data
def _get_data_buffer(self) -> Optional[bytes]:
data = self._data_buffer
self._data_buffer = None
return data
def _pack_string(self, cmd: str) -> bytes:
raise NotImplementedError("Please subclass")
def _unpack_string(self, response: bytes) -> str:
raise NotImplementedError("Please subclass")
def write_raw(self, cmd: str) -> None:
"""
Send a string command to the human interface device
The given command is processed by `_pack_string` method to return a
byte sequence that is going to be actually sent to the device.
Subclasses must implement `_pack_string` method.
Args:
cmd: a command to send in a form of a string
"""
data = self._pack_string(cmd)
result = self._device.send_output_report(data)
if not result:
raise RuntimeError(f"Communication with device failed for command "
f"{cmd}")
def ask_raw(self, cmd: str) -> str:
"""
Send a string command to the human interface device and wait for a reply
The given command is processed by `_pack_string` method to return a
byte sequence that is going to be actually sent to the device.
Subclasses must implement `_pack_string` method.
The byte sequence of the reply is processed by `_unpack_string`
method, and the resulting string is returned. Subclasses must
implement `_unpack_string` method.
Args:
cmd: a command to send in a form of a string
"""
self.write_raw(cmd)
number_of_tries = int(self._tries_per_second * self._timeout)
response = None
for _ in range(number_of_tries):
time.sleep(1 / self._tries_per_second)
response = self._get_data_buffer()
if response is not None:
break
if response is None:
raise TimeoutError(f"Timed out for command {cmd}")
return self._unpack_string(response)
def close(self) -> None:
self._device.close()
@classmethod
def enumerate_devices(cls) -> List[str]:
"""
This method returns the 'instance_id's of all connected devices for
with the given product and vendor IDs.
"""
cls._check_hid_import()
devs = hid.HidDeviceFilter(
porduct_id=cls.product_id,
vendor_id=cls.vendor_id
).get_devices()
return [dev.instance_id for dev in devs]
class MiniCircuitsHIDMixin(USBHIDMixin):
"""
The specific implementation for mini circuit human interface devices.
This implementation allows to use `write`/`ask` methods of the instrument
instance to send SCPI commands to MiniCircuits instruments over USB HID
connection.
Args:
name: instrument name
instance_id: The id of the instrument we want to connect. If there is
only one instrument then this is an optional argument. If we have
more then one instrument, use the class method
`enumerate_devices` to query their IDs
timeout: Specify a timeout for this instrument in seconds
"""
def __init__(self, name: str, instance_id: Optional[str] = None,
timeout: float = 2,
**kwargs: Any):
# USB interrupt code for sending SCPI commands
self._sending_scpi_cmds_code = 1
self._usb_endpoint = 0
self._end_of_message = b"\x00"
self.packet_size = 64
super().__init__(name, instance_id, timeout, **kwargs)
def _pack_string(self, cmd: str) -> bytes:
"""
Pack a string to a binary format such that it can be sent to the HID.
Args:
cmd: a SCPI command to send
"""
str_len = len(cmd)
# "-1" is here because we need to compensate for the first byte in
# the packet which is always the usb interrupt code of the command
# (in this case the command tell the device that we are querying a
# SCPI command)
pad_len = self.packet_size - str_len - 1
if pad_len < 0:
raise ValueError(f"Length of data exceeds {self.packet_size} B")
packed_data = struct.pack(
f"BB{str_len}s{pad_len}x",
self._usb_endpoint,
self._sending_scpi_cmds_code,
cmd.encode("ascii")
)
return packed_data
def _unpack_string(self, response: bytes) ->str:
"""
Unpack data received from the instrument into a string
Note that this method is not specific to SCPI-only responses.
Args:
response: a raw byte sequence response from the instrument
"""
_, _, reply_data = struct.unpack(
f"BB{self.packet_size - 1}s",
bytes(response)
)
span = reply_data.find(self._end_of_message)
return reply_data[:span].decode("ascii")
|
recipes/tcsbank-uconfig/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12654180 | <gh_stars>100-1000
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class TCSBankUconfigConan(ConanFile):
name = "tcsbank-uconfig"
description = "Lightweight, header-only, C++17 configuration library"
topics = ("conan", "configuration", "env", "json")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/TinkoffCreditSystems/uconfig"
license = "Apache-2.0"
generators = "cmake", "cmake_find_package_multi"
settings = "os", "arch", "compiler", "build_type"
options = {
"with_rapidjson": [True, False],
}
default_options = {
"with_rapidjson": True,
}
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def requirements(self):
if self.options.with_rapidjson:
self.requires("rapidjson/1.1.0")
def validate(self):
compiler = str(self.settings.compiler)
compiler_version = tools.Version(self.settings.compiler.version)
min_req_cppstd = "17"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, min_req_cppstd)
else:
self.output.warn("%s recipe lacks information about the %s compiler"
" standard version support." % (self.name, compiler))
minimal_version = {
"Visual Studio": "16",
"gcc": "7.3",
"clang": "6.0",
"apple-clang": "10.0",
}
# Exclude not supported compilers
if compiler not in minimal_version:
self.output.info("%s requires a compiler that supports at least C++%s" % (self.name, min_req_cppstd))
return
if compiler_version < minimal_version[compiler]:
raise ConanInvalidConfiguration(
"%s requires a compiler that supports at least C++%s. %s %s is not supported." %
(self.name, min_req_cppstd, compiler, compiler_version))
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
self.copy("*.ipp", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
self.cpp_info.names["pkg_config"] = "uconfig"
self.cpp_info.names["cmake_find_package"] = "uconfig"
self.cpp_info.names["cmake_find_package_multi"] = "uconfig"
if self.options.with_rapidjson:
self.cpp_info.defines = ["RAPIDJSON_HAS_STDSTRING=1"]
def package_id(self):
self.info.header_only()
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/src/operators/test_membership.py | webdevhub42/Lambda | 12,869 | 12654181 | """Membership operators
@see: https://www.w3schools.com/python/python_operators.asp
Membership operators are used to test if a sequence is presented in an object.
"""
def test_membership_operators():
"""Membership operators"""
# Let's use the following fruit list to illustrate membership concept.
fruit_list = ["apple", "banana"]
# in
# Returns True if a sequence with the specified value is present in the object.
# Returns True because a sequence with the value "banana" is in the list
assert "banana" in fruit_list
# not in
# Returns True if a sequence with the specified value is not present in the object
# Returns True because a sequence with the value "pineapple" is not in the list.
assert "pineapple" not in fruit_list
|
opps/bin/opps-admin.py | jeanmask/opps | 159 | 12654188 | <reponame>jeanmask/opps
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from django.conf import settings
from django.core import management
settings.configure()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Opps CMS bin file')
parser.add_argument('operation', help='task to be performed',
choices=['startproject'])
parser.add_argument("project_name", help="Project name", type=str)
args = parser.parse_args()
if args.operation == 'startproject':
management.call_command(
'startproject', args.project_name,
template='https://github.com/opps/opps-project-template/zipball/'
'master',
extensions=('py', 'md', 'dev')
)
|
setup.py | undertuga/papa-nicolau | 147 | 12654189 | <gh_stars>100-1000
#!/usr/bin/env python
from setuptools import setup, find_packages
import scrape
import os
def read(*names):
values = dict()
extensions = [".txt", ".rst"]
for name in names:
value = ""
for extension in extensions:
filename = name + extension
if os.path.isfile(filename):
value = open(name + extension).read()
break
values[name] = value
return values
with open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.rst"),
encoding="utf-8",
) as f:
long_description = f.read()
setup(
name="scrape",
version=scrape.__version__,
description="a command-line web scraping tool",
long_description=long_description,
long_description_content_type="text/x-rst",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Utilities",
"Topic :: Text Processing",
],
keywords="web crawler scraper scrape crawl download filter save webpages websites images docs document documentation pdf csv html lxml",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com/huntrar/scrape",
license="MIT",
packages=find_packages(),
entry_points={"console_scripts": ["scrape = scrape.scrape:command_line_runner"]},
install_requires=["lxml", "pdfkit", "requests", "six", "tldextract"],
)
|
docs/nb_gen_tests/conftest.py | progwriter/pybatfish | 160 | 12654199 | <reponame>progwriter/pybatfish<gh_stars>100-1000
# coding: utf-8
from os.path import abspath, dirname, realpath
from pathlib import Path
import pytest
import yaml
from pybatfish.client.session import Session
_THIS_DIR: Path = Path(abspath(dirname(realpath(__file__))))
_DOC_DIR: Path = _THIS_DIR.parent
_QUESTIONS_YAML: Path = _DOC_DIR / "nb_gen" / "questions.yaml"
@pytest.fixture(scope="session")
def session():
return Session()
@pytest.fixture(scope="session")
def categories():
return yaml.safe_load(_QUESTIONS_YAML.open())
|
empire/server/modules/python/situational_awareness/host/osx/situational_awareness.py | chenxiangfang/Empire | 2,541 | 12654205 | <reponame>chenxiangfang/Empire
from __future__ import print_function
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common.module_models import PydanticModule
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
script = ''
if params['Debug']:
debug = params['Debug']
script += "Debug = " + str(debug) + '\n'
if params['HistoryCount']:
search = params['HistoryCount']
script += 'HistoryCount = ' + str(search) + '\n'
script += """
try:
import subprocess
import sys
import os
import time
from os.path import expanduser
# Get Home User
home = str(expanduser("~"))
sudo = True
# Check for sudo privs, if true than set true
process = subprocess.Popen('which sudo|wc -l', stdout=subprocess.PIPE, shell=True)
result = process.communicate()
result = result[0].strip()
if str(result) != "1":
print("[!] ERROR some shit requires (sudo) privileges!")
sudo = False
sys.exit()
# Enum Hostname
try:
process = subprocess.Popen('hostname', stdout=subprocess.PIPE, shell=True)
hostname = process.communicate()
hostname = hostname[0].strip()
print("[*] Hostname:")
print((" - " + str(hostname.strip())))
except Exception as e:
if Debug:
print(("[!] Error enumerating hostname: " + str(e)))
pass
# Enum Software Package
try:
process = subprocess.Popen('sw_vers -productVersion', stdout=subprocess.PIPE, shell=True)
swvers = process.communicate()
swvers = swvers[0].strip()
print("[*] MAC OS Package Level:")
print((" - " + str(swvers.strip())))
except Exception as e:
if Debug:
print(("[!] Error enumerating OS Package: " + str(e)))
pass
# Enume system Hardware Overview
try:
process = subprocess.Popen("system_profiler SPHardwareDataType", stdout=subprocess.PIPE, shell=True)
ho = process.communicate()
ho = ho[0].split('\\n')
print("[*] Hardware Overview:")
for x in ho[4:]:
if x:
print((" - " + str(x.strip())))
except Exception as e:
if Debug:
print(("[!] Error enumerating Hardware Overview: " + str(e)))
# Enum Users
try:
process = subprocess.Popen("dscacheutil -q user | grep -A 3 -B 2 -e uid:\ 5'[0-9][0-9]'", stdout=subprocess.PIPE, shell=True)
users = process.communicate()
users = users[0].split('\\n')
print("[*] Client Users:")
for x in users:
if x:
print(" - " + str(x.strip()))
else:
print()
except Exception as e:
if Debug:
print("[!] Error enumerating OS Package: " + str(e))
pass
# Enum Last Logins
try:
print("[*] Last Logins:")
process = subprocess.Popen("last -10", stdout=subprocess.PIPE, shell=True)
last = process.communicate()
last = last[0].split('\\n')
for x in last:
if x.startswith('wtmp'):
break
if x:
print(" - " + str(x.strip()))
except Exception as e:
if Debug:
print("[!] Error Enumerating en0: " + str(e))
pass
# Enum Hardware
try:
process = subprocess.Popen("networksetup -listallhardwareports", stdout=subprocess.PIPE, shell=True)
hardware = process.communicate()
hardware = hardware[0].split('\\n')
print("[*] Installed Interfaces:")
for x in hardware:
if x:
print(" - " + str(x.strip()))
else:
print()
except Exception as e:
if Debug:
print("[!] Error Enumerating Installed Interfaces: " + str(e))
pass
# Enum en0
try:
process = subprocess.Popen("ipconfig getpacket en0", stdout=subprocess.PIPE, shell=True)
inf = process.communicate()
inf = inf[0].split('\\n')
print("[*] en0 Interface:")
for x in inf:
if x:
print(" - " + str(x.strip()))
else:
print()
except Exception as e:
if Debug:
print("[!] Error Enumerating en0: " + str(e))
pass
# Enum Hosts DNS file
try:
process = subprocess.Popen("cat /private/etc/hosts", stdout=subprocess.PIPE, shell=True)
hosts = process.communicate()
hosts = hosts[0].split('\\n')
print("[*] DNS Hosts File:")
for x in hosts:
if x:
if x.startswith("#"):
pass
else:
print(" - " + str(x.strip()))
else:
print()
except Exception as e:
if Debug:
print("[!] Error Enumerating Hosts File: " + str(e))
pass
# Enum bash history
try:
location = home + "/.bash_history"
with open(location, 'r') as myfile:
HistoryResult = myfile.readlines()
HistoryCount = HistoryCount * -1
print("[*] Enumerating User Bash History")
print(" - History count size: " + str(len(HistoryResult)))
for item in HistoryResult[HistoryCount:]:
print(" * " + str(item.strip()))
print("[*] SSH commands in History: ")
for item in HistoryResult:
if "ssh" in item.lower():
print(" * " + str(item.strip()))
except Exception as e:
if Debug:
print("[!] Error enumerating user bash_history: " + str(e))
pass
# Enum Wireless Connectivity Info
try:
process = subprocess.Popen(executable="/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport", args="-I", stdout=subprocess.PIPE, shell=True)
wireless = process.communicate()
if wireless[0] != '':
wireless = wireless[0].split('\\n')
print("[*] Wireless Connectivity Info:")
for x in wireless:
if x:
print(" - " + str(x.strip()))
else:
print()
except Exception as e:
if Debug:
print("[!] Error enumerating user Wireless Connectivity Info: " + str(e))
pass
# Enum AV / Protection Software
except Exception as e:
print(e)"""
# add any arguments to the end exec
return script
|
boto3_type_annotations_with_docs/boto3_type_annotations/ec2/waiter.py | cowboygneox/boto3_type_annotations | 119 | 12654222 | from typing import Dict
from typing import List
from botocore.waiter import Waiter
class BundleTaskComplete(Waiter):
def wait(self, BundleIds: List = None, Filters: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_bundle_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasks>`_
**Request Syntax**
::
waiter.wait(
BundleIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type BundleIds: list
:param BundleIds:
The bundle task IDs.
Default: Describes all your bundle tasks.
- *(string) --*
:type Filters: list
:param Filters:
The filters.
* ``bundle-id`` - The ID of the bundle task.
* ``error-code`` - If the task failed, the error code returned.
* ``error-message`` - If the task failed, the error message returned.
* ``instance-id`` - The ID of the instance.
* ``progress`` - The level of task completion, as a percentage (for example, 20%).
* ``s3-bucket`` - The Amazon S3 bucket to store the AMI.
* ``s3-prefix`` - The beginning of the AMI name.
* ``start-time`` - The time the task started (for example, 2013-09-15T17:15:20.000Z).
* ``state`` - The state of the task (``pending`` | ``waiting-for-shutdown`` | ``bundling`` | ``storing`` | ``cancelling`` | ``complete`` | ``failed`` ).
* ``update-time`` - The time of the most recent update for the task.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ConversionTaskCancelled(Waiter):
def wait(self, ConversionTaskIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_conversion_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks>`_
**Request Syntax**
::
waiter.wait(
ConversionTaskIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ConversionTaskIds: list
:param ConversionTaskIds:
The conversion task IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ConversionTaskCompleted(Waiter):
def wait(self, ConversionTaskIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_conversion_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks>`_
**Request Syntax**
::
waiter.wait(
ConversionTaskIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ConversionTaskIds: list
:param ConversionTaskIds:
The conversion task IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ConversionTaskDeleted(Waiter):
def wait(self, ConversionTaskIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_conversion_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks>`_
**Request Syntax**
::
waiter.wait(
ConversionTaskIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ConversionTaskIds: list
:param ConversionTaskIds:
The conversion task IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class CustomerGatewayAvailable(Waiter):
def wait(self, CustomerGatewayIds: List = None, Filters: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_customer_gateways` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGateways>`_
**Request Syntax**
::
waiter.wait(
CustomerGatewayIds=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type CustomerGatewayIds: list
:param CustomerGatewayIds:
One or more customer gateway IDs.
Default: Describes all your customer gateways.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters.
* ``bgp-asn`` - The customer gateway\'s Border Gateway Protocol (BGP) Autonomous System Number (ASN).
* ``customer-gateway-id`` - The ID of the customer gateway.
* ``ip-address`` - The IP address of the customer gateway\'s Internet-routable external interface.
* ``state`` - The state of the customer gateway (``pending`` | ``available`` | ``deleting`` | ``deleted`` ).
* ``type`` - The type of customer gateway. Currently, the only supported type is ``ipsec.1`` .
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ExportTaskCancelled(Waiter):
def wait(self, ExportTaskIds: List = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_export_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks>`_
**Request Syntax**
::
waiter.wait(
ExportTaskIds=[
'string',
],
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ExportTaskIds: list
:param ExportTaskIds:
The export task IDs.
- *(string) --*
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ExportTaskCompleted(Waiter):
def wait(self, ExportTaskIds: List = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_export_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks>`_
**Request Syntax**
::
waiter.wait(
ExportTaskIds=[
'string',
],
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ExportTaskIds: list
:param ExportTaskIds:
The export task IDs.
- *(string) --*
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ImageAvailable(Waiter):
def wait(self, ExecutableUsers: List = None, Filters: List = None, ImageIds: List = None, Owners: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_images` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages>`_
**Request Syntax**
::
waiter.wait(
ExecutableUsers=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
ImageIds=[
'string',
],
Owners=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ExecutableUsers: list
:param ExecutableUsers:
Scopes the images by users with explicit launch permissions. Specify an AWS account ID, ``self`` (the sender of the request), or ``all`` (public AMIs).
- *(string) --*
:type Filters: list
:param Filters:
The filters.
* ``architecture`` - The image architecture (``i386`` | ``x86_64`` ).
* ``block-device-mapping.delete-on-termination`` - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.snapshot-id`` - The ID of the snapshot used for the EBS volume.
* ``block-device-mapping.volume-size`` - The volume size of the EBS volume, in GiB.
* ``block-device-mapping.volume-type`` - The volume type of the EBS volume (``gp2`` | ``io1`` | ``st1`` | ``sc1`` | ``standard`` ).
* ``block-device-mapping.encrypted`` - A Boolean that indicates whether the EBS volume is encrypted.
* ``description`` - The description of the image (provided during image creation).
* ``ena-support`` - A Boolean that indicates whether enhanced networking with ENA is enabled.
* ``hypervisor`` - The hypervisor type (``ovm`` | ``xen`` ).
* ``image-id`` - The ID of the image.
* ``image-type`` - The image type (``machine`` | ``kernel`` | ``ramdisk`` ).
* ``is-public`` - A Boolean that indicates whether the image is public.
* ``kernel-id`` - The kernel ID.
* ``manifest-location`` - The location of the image manifest.
* ``name`` - The name of the AMI (provided during image creation).
* ``owner-alias`` - String value from an Amazon-maintained list (``amazon`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.
* ``owner-id`` - The AWS account ID of the image owner.
* ``platform`` - The platform. To only list Windows-based AMIs, use ``windows`` .
* ``product-code`` - The product code.
* ``product-code.type`` - The type of the product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``state`` - The state of the image (``available`` | ``pending`` | ``failed`` ).
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - The message for the state change.
* ``sriov-net-support`` - A value of ``simple`` indicates that enhanced networking with the Intel 82599 VF interface is enabled.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``virtualization-type`` - The virtualization type (``paravirtual`` | ``hvm`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type ImageIds: list
:param ImageIds:
The image IDs.
Default: Describes all images available to you.
- *(string) --*
:type Owners: list
:param Owners:
Filters the images by the owner. Specify an AWS account ID, ``self`` (owner is the sender of the request), or an AWS owner alias (valid values are ``amazon`` | ``aws-marketplace`` | ``microsoft`` ). Omitting this option returns all images for which you have launch permissions, regardless of ownership.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class ImageExists(Waiter):
def wait(self, ExecutableUsers: List = None, Filters: List = None, ImageIds: List = None, Owners: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_images` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages>`_
**Request Syntax**
::
waiter.wait(
ExecutableUsers=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
ImageIds=[
'string',
],
Owners=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ExecutableUsers: list
:param ExecutableUsers:
Scopes the images by users with explicit launch permissions. Specify an AWS account ID, ``self`` (the sender of the request), or ``all`` (public AMIs).
- *(string) --*
:type Filters: list
:param Filters:
The filters.
* ``architecture`` - The image architecture (``i386`` | ``x86_64`` ).
* ``block-device-mapping.delete-on-termination`` - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.snapshot-id`` - The ID of the snapshot used for the EBS volume.
* ``block-device-mapping.volume-size`` - The volume size of the EBS volume, in GiB.
* ``block-device-mapping.volume-type`` - The volume type of the EBS volume (``gp2`` | ``io1`` | ``st1`` | ``sc1`` | ``standard`` ).
* ``block-device-mapping.encrypted`` - A Boolean that indicates whether the EBS volume is encrypted.
* ``description`` - The description of the image (provided during image creation).
* ``ena-support`` - A Boolean that indicates whether enhanced networking with ENA is enabled.
* ``hypervisor`` - The hypervisor type (``ovm`` | ``xen`` ).
* ``image-id`` - The ID of the image.
* ``image-type`` - The image type (``machine`` | ``kernel`` | ``ramdisk`` ).
* ``is-public`` - A Boolean that indicates whether the image is public.
* ``kernel-id`` - The kernel ID.
* ``manifest-location`` - The location of the image manifest.
* ``name`` - The name of the AMI (provided during image creation).
* ``owner-alias`` - String value from an Amazon-maintained list (``amazon`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.
* ``owner-id`` - The AWS account ID of the image owner.
* ``platform`` - The platform. To only list Windows-based AMIs, use ``windows`` .
* ``product-code`` - The product code.
* ``product-code.type`` - The type of the product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``state`` - The state of the image (``available`` | ``pending`` | ``failed`` ).
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - The message for the state change.
* ``sriov-net-support`` - A value of ``simple`` indicates that enhanced networking with the Intel 82599 VF interface is enabled.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``virtualization-type`` - The virtualization type (``paravirtual`` | ``hvm`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type ImageIds: list
:param ImageIds:
The image IDs.
Default: Describes all images available to you.
- *(string) --*
:type Owners: list
:param Owners:
Filters the images by the owner. Specify an AWS account ID, ``self`` (owner is the sender of the request), or an AWS owner alias (valid values are ``amazon`` | ``aws-marketplace`` | ``microsoft`` ). Omitting this option returns all images for which you have launch permissions, regardless of ownership.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class InstanceExists(Waiter):
def wait(self, Filters: List = None, InstanceIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_instances` every 5 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='<PASSWORD>',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ).
* ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ).
* ``availability-zone`` - The Availability Zone of the instance.
* ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` .
* ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``block-device-mapping.volume-id`` - The volume ID of the EBS volume.
* ``client-token`` - The idempotency token you provided when you launched the instance.
* ``dns-name`` - The public DNS name of the instance.
* ``group-id`` - The ID of the security group for the instance. EC2-Classic only.
* ``group-name`` - The name of the security group for the instance. EC2-Classic only.
* ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation.
* ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable.
* ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ).
* ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN.
* ``image-id`` - The ID of the image used to launch the instance.
* ``instance-id`` - The ID of the instance.
* ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ).
* ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-type`` - The type of instance (for example, ``t2.micro`` ).
* ``instance.group-id`` - The ID of the security group for the instance.
* ``instance.group-name`` - The name of the security group for the instance.
* ``ip-address`` - The public IPv4 address of the instance.
* ``kernel-id`` - The kernel ID.
* ``key-name`` - The name of the key pair used when the instance was launched.
* ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
* ``launch-time`` - The time when the instance was launched.
* ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ).
* ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface.
* ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
* ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface.
* ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface.
* ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``network-interface.attachment.attachment-id`` - The ID of the interface attachment.
* ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``network-interface.attachment.device-index`` - The device index to which the network interface is attached.
* ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance.
* ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated.
* ``network-interface.availability-zone`` - The Availability Zone for the network interface.
* ``network-interface.description`` - The description of the network interface.
* ``network-interface.group-id`` - The ID of a security group associated with the network interface.
* ``network-interface.group-name`` - The name of a security group associated with the network interface.
* ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface.
* ``network-interface.mac-address`` - The MAC address of the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.owner-id`` - The ID of the owner of the network interface.
* ``network-interface.private-dns-name`` - The private DNS name of the network interface.
* ``network-interface.requester-id`` - The requester ID for the network interface.
* ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS.
* ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ).
* ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``network-interface.subnet-id`` - The ID of the subnet for the network interface.
* ``network-interface.vpc-id`` - The ID of the VPC for the network interface.
* ``owner-id`` - The AWS account ID of the instance owner.
* ``placement-group-name`` - The name of the placement group for the instance.
* ``placement-partition-number`` - The partition in which the instance is located.
* ``platform`` - The platform. To list only Windows instances, use ``windows`` .
* ``private-dns-name`` - The private IPv4 DNS name of the instance.
* ``private-ip-address`` - The private IPv4 address of the instance.
* ``product-code`` - The product code associated with the AMI used to launch the instance.
* ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC.
* ``spot-instance-request-id`` - The ID of the Spot Instance request.
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - A message that describes the state change.
* ``subnet-id`` - The ID of the subnet for the instance.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
* ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ).
* ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ).
* ``vpc-id`` - The ID of the VPC that the instance is running in.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.
:type NextToken: string
:param NextToken:
The token to request the next page of results.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 5
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class InstanceRunning(Waiter):
def wait(self, Filters: List = None, InstanceIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_instances` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ).
* ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ).
* ``availability-zone`` - The Availability Zone of the instance.
* ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` .
* ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``block-device-mapping.volume-id`` - The volume ID of the EBS volume.
* ``client-token`` - The idempotency token you provided when you launched the instance.
* ``dns-name`` - The public DNS name of the instance.
* ``group-id`` - The ID of the security group for the instance. EC2-Classic only.
* ``group-name`` - The name of the security group for the instance. EC2-Classic only.
* ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation.
* ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable.
* ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ).
* ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN.
* ``image-id`` - The ID of the image used to launch the instance.
* ``instance-id`` - The ID of the instance.
* ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ).
* ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-type`` - The type of instance (for example, ``t2.micro`` ).
* ``instance.group-id`` - The ID of the security group for the instance.
* ``instance.group-name`` - The name of the security group for the instance.
* ``ip-address`` - The public IPv4 address of the instance.
* ``kernel-id`` - The kernel ID.
* ``key-name`` - The name of the key pair used when the instance was launched.
* ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
* ``launch-time`` - The time when the instance was launched.
* ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ).
* ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface.
* ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
* ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface.
* ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface.
* ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``network-interface.attachment.attachment-id`` - The ID of the interface attachment.
* ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``network-interface.attachment.device-index`` - The device index to which the network interface is attached.
* ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance.
* ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated.
* ``network-interface.availability-zone`` - The Availability Zone for the network interface.
* ``network-interface.description`` - The description of the network interface.
* ``network-interface.group-id`` - The ID of a security group associated with the network interface.
* ``network-interface.group-name`` - The name of a security group associated with the network interface.
* ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface.
* ``network-interface.mac-address`` - The MAC address of the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.owner-id`` - The ID of the owner of the network interface.
* ``network-interface.private-dns-name`` - The private DNS name of the network interface.
* ``network-interface.requester-id`` - The requester ID for the network interface.
* ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS.
* ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ).
* ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``network-interface.subnet-id`` - The ID of the subnet for the network interface.
* ``network-interface.vpc-id`` - The ID of the VPC for the network interface.
* ``owner-id`` - The AWS account ID of the instance owner.
* ``placement-group-name`` - The name of the placement group for the instance.
* ``placement-partition-number`` - The partition in which the instance is located.
* ``platform`` - The platform. To list only Windows instances, use ``windows`` .
* ``private-dns-name`` - The private IPv4 DNS name of the instance.
* ``private-ip-address`` - The private IPv4 address of the instance.
* ``product-code`` - The product code associated with the AMI used to launch the instance.
* ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC.
* ``spot-instance-request-id`` - The ID of the Spot Instance request.
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - A message that describes the state change.
* ``subnet-id`` - The ID of the subnet for the instance.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
* ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ).
* ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ).
* ``vpc-id`` - The ID of the VPC that the instance is running in.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.
:type NextToken: string
:param NextToken:
The token to request the next page of results.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class InstanceStatusOk(Waiter):
def wait(self, Filters: List = None, InstanceIds: List = None, MaxResults: int = None, NextToken: str = None, DryRun: bool = None, IncludeAllInstances: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_instance_status` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
MaxResults=123,
NextToken='string',
DryRun=True|False,
IncludeAllInstances=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``availability-zone`` - The Availability Zone of the instance.
* ``event.code`` - The code for the scheduled event (``instance-reboot`` | ``system-reboot`` | ``system-maintenance`` | ``instance-retirement`` | ``instance-stop`` ).
* ``event.description`` - A description of the event.
* ``event.instance-event-id`` - The ID of the event whose date and time you are modifying.
* ``event.not-after`` - The latest end time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``event.not-before`` - The earliest start time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``event.not-before-deadline`` - The deadline for starting the event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``instance-state-code`` - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-status.reachability`` - Filters on instance status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ).
* ``instance-status.status`` - The status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ).
* ``system-status.reachability`` - Filters on system status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ).
* ``system-status.status`` - The system status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
Constraints: Maximum 100 explicitly specified instance IDs.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.
:type NextToken: string
:param NextToken:
The token to retrieve the next page of results.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type IncludeAllInstances: boolean
:param IncludeAllInstances:
When ``true`` , includes the health status for all instances. When ``false`` , includes the health status for running instances only.
Default: ``false``
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class InstanceStopped(Waiter):
def wait(self, Filters: List = None, InstanceIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_instances` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='<PASSWORD>',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ).
* ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ).
* ``availability-zone`` - The Availability Zone of the instance.
* ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` .
* ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``block-device-mapping.volume-id`` - The volume ID of the EBS volume.
* ``client-token`` - The idempotency token you provided when you launched the instance.
* ``dns-name`` - The public DNS name of the instance.
* ``group-id`` - The ID of the security group for the instance. EC2-Classic only.
* ``group-name`` - The name of the security group for the instance. EC2-Classic only.
* ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation.
* ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable.
* ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ).
* ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN.
* ``image-id`` - The ID of the image used to launch the instance.
* ``instance-id`` - The ID of the instance.
* ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ).
* ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-type`` - The type of instance (for example, ``t2.micro`` ).
* ``instance.group-id`` - The ID of the security group for the instance.
* ``instance.group-name`` - The name of the security group for the instance.
* ``ip-address`` - The public IPv4 address of the instance.
* ``kernel-id`` - The kernel ID.
* ``key-name`` - The name of the key pair used when the instance was launched.
* ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
* ``launch-time`` - The time when the instance was launched.
* ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ).
* ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface.
* ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
* ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface.
* ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface.
* ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``network-interface.attachment.attachment-id`` - The ID of the interface attachment.
* ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``network-interface.attachment.device-index`` - The device index to which the network interface is attached.
* ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance.
* ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated.
* ``network-interface.availability-zone`` - The Availability Zone for the network interface.
* ``network-interface.description`` - The description of the network interface.
* ``network-interface.group-id`` - The ID of a security group associated with the network interface.
* ``network-interface.group-name`` - The name of a security group associated with the network interface.
* ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface.
* ``network-interface.mac-address`` - The MAC address of the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.owner-id`` - The ID of the owner of the network interface.
* ``network-interface.private-dns-name`` - The private DNS name of the network interface.
* ``network-interface.requester-id`` - The requester ID for the network interface.
* ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS.
* ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ).
* ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``network-interface.subnet-id`` - The ID of the subnet for the network interface.
* ``network-interface.vpc-id`` - The ID of the VPC for the network interface.
* ``owner-id`` - The AWS account ID of the instance owner.
* ``placement-group-name`` - The name of the placement group for the instance.
* ``placement-partition-number`` - The partition in which the instance is located.
* ``platform`` - The platform. To list only Windows instances, use ``windows`` .
* ``private-dns-name`` - The private IPv4 DNS name of the instance.
* ``private-ip-address`` - The private IPv4 address of the instance.
* ``product-code`` - The product code associated with the AMI used to launch the instance.
* ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC.
* ``spot-instance-request-id`` - The ID of the Spot Instance request.
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - A message that describes the state change.
* ``subnet-id`` - The ID of the subnet for the instance.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
* ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ).
* ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ).
* ``vpc-id`` - The ID of the VPC that the instance is running in.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.
:type NextToken: string
:param NextToken:
The token to request the next page of results.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class InstanceTerminated(Waiter):
def wait(self, Filters: List = None, InstanceIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_instances` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ).
* ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ).
* ``availability-zone`` - The Availability Zone of the instance.
* ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` .
* ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination.
* ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``block-device-mapping.volume-id`` - The volume ID of the EBS volume.
* ``client-token`` - The idempotency token you provided when you launched the instance.
* ``dns-name`` - The public DNS name of the instance.
* ``group-id`` - The ID of the security group for the instance. EC2-Classic only.
* ``group-name`` - The name of the security group for the instance. EC2-Classic only.
* ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation.
* ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable.
* ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ).
* ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN.
* ``image-id`` - The ID of the image used to launch the instance.
* ``instance-id`` - The ID of the instance.
* ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ).
* ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-type`` - The type of instance (for example, ``t2.micro`` ).
* ``instance.group-id`` - The ID of the security group for the instance.
* ``instance.group-name`` - The name of the security group for the instance.
* ``ip-address`` - The public IPv4 address of the instance.
* ``kernel-id`` - The kernel ID.
* ``key-name`` - The name of the key pair used when the instance was launched.
* ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).
* ``launch-time`` - The time when the instance was launched.
* ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ).
* ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface.
* ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.
* ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface.
* ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface.
* ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``network-interface.attachment.attachment-id`` - The ID of the interface attachment.
* ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``network-interface.attachment.device-index`` - The device index to which the network interface is attached.
* ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance.
* ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated.
* ``network-interface.availability-zone`` - The Availability Zone for the network interface.
* ``network-interface.description`` - The description of the network interface.
* ``network-interface.group-id`` - The ID of a security group associated with the network interface.
* ``network-interface.group-name`` - The name of a security group associated with the network interface.
* ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface.
* ``network-interface.mac-address`` - The MAC address of the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.owner-id`` - The ID of the owner of the network interface.
* ``network-interface.private-dns-name`` - The private DNS name of the network interface.
* ``network-interface.requester-id`` - The requester ID for the network interface.
* ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS.
* ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ).
* ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``network-interface.subnet-id`` - The ID of the subnet for the network interface.
* ``network-interface.vpc-id`` - The ID of the VPC for the network interface.
* ``owner-id`` - The AWS account ID of the instance owner.
* ``placement-group-name`` - The name of the placement group for the instance.
* ``placement-partition-number`` - The partition in which the instance is located.
* ``platform`` - The platform. To list only Windows instances, use ``windows`` .
* ``private-dns-name`` - The private IPv4 DNS name of the instance.
* ``private-ip-address`` - The private IPv4 address of the instance.
* ``product-code`` - The product code associated with the AMI used to launch the instance.
* ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ).
* ``ramdisk-id`` - The RAM disk ID.
* ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.
* ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ).
* ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ).
* ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC.
* ``spot-instance-request-id`` - The ID of the Spot Instance request.
* ``state-reason-code`` - The reason code for the state change.
* ``state-reason-message`` - A message that describes the state change.
* ``subnet-id`` - The ID of the subnet for the instance.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.
* ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ).
* ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ).
* ``vpc-id`` - The ID of the VPC that the instance is running in.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.
:type NextToken: string
:param NextToken:
The token to request the next page of results.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class KeyPairExists(Waiter):
def wait(self, Filters: List = None, KeyNames: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_key_pairs` every 5 seconds until a successful state is reached. An error is returned after 6 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairs>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
KeyNames=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``fingerprint`` - The fingerprint of the key pair.
* ``key-name`` - The name of the key pair.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type KeyNames: list
:param KeyNames:
The key pair names.
Default: Describes all your key pairs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 5
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 6
:returns: None
"""
pass
class NatGatewayAvailable(Waiter):
def wait(self, Filters: List = None, MaxResults: int = None, NatGatewayIds: List = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_nat_gateways` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGateways>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxResults=123,
NatGatewayIds=[
'string',
],
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``nat-gateway-id`` - The ID of the NAT gateway.
* ``state`` - The state of the NAT gateway (``pending`` | ``failed`` | ``available`` | ``deleting`` | ``deleted`` ).
* ``subnet-id`` - The ID of the subnet in which the NAT gateway resides.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC in which the NAT gateway resides.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value.
:type NatGatewayIds: list
:param NatGatewayIds:
One or more NAT gateway IDs.
- *(string) --*
:type NextToken: string
:param NextToken:
The token for the next page of results.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class NetworkInterfaceAvailable(Waiter):
def wait(self, Filters: List = None, DryRun: bool = None, NetworkInterfaceIds: List = None, NextToken: str = None, MaxResults: int = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_network_interfaces` every 20 seconds until a successful state is reached. An error is returned after 10 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaces>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
NetworkInterfaceIds=[
'string',
],
NextToken='string',
MaxResults=123,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``addresses.private-ip-address`` - The private IPv4 addresses associated with the network interface.
* ``addresses.primary`` - Whether the private IPv4 address is the primary IP address associated with the network interface.
* ``addresses.association.public-ip`` - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).
* ``addresses.association.owner-id`` - The owner ID of the addresses associated with the network interface.
* ``association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address.
* ``association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.
* ``association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface.
* ``association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface.
* ``association.public-dns-name`` - The public DNS name for the network interface (IPv4).
* ``attachment.attachment-id`` - The ID of the interface attachment.
* ``attachment.attach.time`` - The time that the network interface was attached to an instance.
* ``attachment.delete-on-termination`` - Indicates whether the attachment is deleted when an instance is terminated.
* ``attachment.device-index`` - The device index to which the network interface is attached.
* ``attachment.instance-id`` - The ID of the instance to which the network interface is attached.
* ``attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached.
* ``attachment.nat-gateway-id`` - The ID of the NAT gateway to which the network interface is attached.
* ``attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ).
* ``availability-zone`` - The Availability Zone of the network interface.
* ``description`` - The description of the network interface.
* ``group-id`` - The ID of a security group associated with the network interface.
* ``group-name`` - The name of a security group associated with the network interface.
* ``ipv6-addresses.ipv6-address`` - An IPv6 address associated with the network interface.
* ``mac-address`` - The MAC address of the network interface.
* ``network-interface-id`` - The ID of the network interface.
* ``owner-id`` - The AWS account ID of the network interface owner.
* ``private-ip-address`` - The private IPv4 address or addresses of the network interface.
* ``private-dns-name`` - The private DNS name of the network interface (IPv4).
* ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).
* ``requester-managed`` - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).
* ``source-dest-check`` - Indicates whether the network interface performs source/destination checking. A value of ``true`` means checking is enabled, and ``false`` means checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC.
* ``status`` - The status of the network interface. If the network interface is not attached to an instance, the status is ``available`` ; if a network interface is attached to an instance the status is ``in-use`` .
* ``subnet-id`` - The ID of the subnet for the network interface.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC for the network interface.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type NetworkInterfaceIds: list
:param NetworkInterfaceIds:
One or more network interface IDs.
Default: Describes all your network interfaces.
- *(string) --*
:type NextToken: string
:param NextToken:
The token to retrieve the next page of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 20
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 10
:returns: None
"""
pass
class PasswordDataAvailable(Waiter):
def wait(self, InstanceId: str, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.get_password_data` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordData>`_
**Request Syntax**
::
waiter.wait(
InstanceId='string',
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type InstanceId: string
:param InstanceId: **[REQUIRED]**
The ID of the Windows instance.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class SnapshotCompleted(Waiter):
def wait(self, Filters: List = None, MaxResults: int = None, NextToken: str = None, OwnerIds: List = None, RestorableByUserIds: List = None, SnapshotIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_snapshots` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshots>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxResults=123,
NextToken='string',
OwnerIds=[
'string',
],
RestorableByUserIds=[
'string',
],
SnapshotIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``description`` - A description of the snapshot.
* ``encrypted`` - Indicates whether the snapshot is encrypted (``true`` | ``false`` )
* ``owner-alias`` - Value from an Amazon-maintained list (``amazon`` | ``self`` | ``all`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.
* ``owner-id`` - The ID of the AWS account that owns the snapshot.
* ``progress`` - The progress of the snapshot, as a percentage (for example, 80%).
* ``snapshot-id`` - The snapshot ID.
* ``start-time`` - The time stamp when the snapshot was initiated.
* ``status`` - The status of the snapshot (``pending`` | ``completed`` | ``error`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``volume-id`` - The ID of the volume the snapshot is for.
* ``volume-size`` - The size of the volume, in GiB.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of snapshot results returned by ``DescribeSnapshots`` in paginated output. When this parameter is used, ``DescribeSnapshots`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeSnapshots`` request with the returned ``NextToken`` value. This value can be between 5 and 1000; if ``MaxResults`` is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then ``DescribeSnapshots`` returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.
:type NextToken: string
:param NextToken:
The ``NextToken`` value returned from a previous paginated ``DescribeSnapshots`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return.
:type OwnerIds: list
:param OwnerIds:
Describes the snapshots owned by these owners.
- *(string) --*
:type RestorableByUserIds: list
:param RestorableByUserIds:
The IDs of the AWS accounts that can create volumes from the snapshot.
- *(string) --*
:type SnapshotIds: list
:param SnapshotIds:
The snapshot IDs.
Default: Describes the snapshots for which you have create volume permissions.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class SpotInstanceRequestFulfilled(Waiter):
def wait(self, Filters: List = None, DryRun: bool = None, SpotInstanceRequestIds: List = None, NextToken: str = None, MaxResults: int = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_spot_instance_requests` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequests>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
SpotInstanceRequestIds=[
'string',
],
NextToken='<PASSWORD>',
MaxResults=123,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``availability-zone-group`` - The Availability Zone group.
* ``create-time`` - The time stamp when the Spot Instance request was created.
* ``fault-code`` - The fault code related to the request.
* ``fault-message`` - The fault message related to the request.
* ``instance-id`` - The ID of the instance that fulfilled the request.
* ``launch-group`` - The Spot Instance launch group.
* ``launch.block-device-mapping.delete-on-termination`` - Indicates whether the EBS volume is deleted on instance termination.
* ``launch.block-device-mapping.device-name`` - The device name for the volume in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ).
* ``launch.block-device-mapping.snapshot-id`` - The ID of the snapshot for the EBS volume.
* ``launch.block-device-mapping.volume-size`` - The size of the EBS volume, in GiB.
* ``launch.block-device-mapping.volume-type`` - The type of EBS volume: ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic.
* ``launch.group-id`` - The ID of the security group for the instance.
* ``launch.group-name`` - The name of the security group for the instance.
* ``launch.image-id`` - The ID of the AMI.
* ``launch.instance-type`` - The type of instance (for example, ``m3.medium`` ).
* ``launch.kernel-id`` - The kernel ID.
* ``launch.key-name`` - The name of the key pair the instance launched with.
* ``launch.monitoring-enabled`` - Whether detailed monitoring is enabled for the Spot Instance.
* ``launch.ramdisk-id`` - The RAM disk ID.
* ``launched-availability-zone`` - The Availability Zone in which the request is launched.
* ``network-interface.addresses.primary`` - Indicates whether the IP address is the primary private IP address.
* ``network-interface.delete-on-termination`` - Indicates whether the network interface is deleted when the instance is terminated.
* ``network-interface.description`` - A description of the network interface.
* ``network-interface.device-index`` - The index of the device for the network interface attachment on the instance.
* ``network-interface.group-id`` - The ID of the security group associated with the network interface.
* ``network-interface.network-interface-id`` - The ID of the network interface.
* ``network-interface.private-ip-address`` - The primary private IP address of the network interface.
* ``network-interface.subnet-id`` - The ID of the subnet for the instance.
* ``product-description`` - The product description associated with the instance (``Linux/UNIX`` | ``Windows`` ).
* ``spot-instance-request-id`` - The Spot Instance request ID.
* ``spot-price`` - The maximum hourly price for any Spot Instance launched to fulfill the request.
* ``state`` - The state of the Spot Instance request (``open`` | ``active`` | ``closed`` | ``cancelled`` | ``failed`` ). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see `Spot Request Status <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html>`__ in the *Amazon EC2 User Guide for Linux Instances* .
* ``status-code`` - The short code describing the most recent evaluation of your Spot Instance request.
* ``status-message`` - The message explaining the status of the Spot Instance request.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``type`` - The type of Spot Instance request (``one-time`` | ``persistent`` ).
* ``valid-from`` - The start date of the request.
* ``valid-until`` - The end date of the request.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type SpotInstanceRequestIds: list
:param SpotInstanceRequestIds:
One or more Spot Instance request IDs.
- *(string) --*
:type NextToken: string
:param NextToken:
The token to request the next set of results. This value is ``null`` when there are no more results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. Specify a value between 5 and 1000. To retrieve the remaining results, make another call with the returned ``NextToken`` value.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class SubnetAvailable(Waiter):
def wait(self, Filters: List = None, SubnetIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_subnets` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnets>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
SubnetIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``availability-zone`` - The Availability Zone for the subnet. You can also use ``availabilityZone`` as the filter name.
* ``availability-zone-id`` - The ID of the Availability Zone for the subnet. You can also use ``availabilityZoneId`` as the filter name.
* ``available-ip-address-count`` - The number of IPv4 addresses in the subnet that are available.
* ``cidr-block`` - The IPv4 CIDR block of the subnet. The CIDR block you specify must exactly match the subnet\'s CIDR block for information to be returned for the subnet. You can also use ``cidr`` or ``cidrBlock`` as the filter names.
* ``default-for-az`` - Indicates whether this is the default subnet for the Availability Zone. You can also use ``defaultForAz`` as the filter name.
* ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the subnet.
* ``ipv6-cidr-block-association.association-id`` - An association ID for an IPv6 CIDR block associated with the subnet.
* ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the subnet.
* ``owner-id`` - The ID of the AWS account that owns the subnet.
* ``state`` - The state of the subnet (``pending`` | ``available`` ).
* ``subnet-arn`` - The Amazon Resource Name (ARN) of the subnet.
* ``subnet-id`` - The ID of the subnet.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC for the subnet.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type SubnetIds: list
:param SubnetIds:
One or more subnet IDs.
Default: Describes all your subnets.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class SystemStatusOk(Waiter):
def wait(self, Filters: List = None, InstanceIds: List = None, MaxResults: int = None, NextToken: str = None, DryRun: bool = None, IncludeAllInstances: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_instance_status` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
InstanceIds=[
'string',
],
MaxResults=123,
NextToken='string',
DryRun=True|False,
IncludeAllInstances=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``availability-zone`` - The Availability Zone of the instance.
* ``event.code`` - The code for the scheduled event (``instance-reboot`` | ``system-reboot`` | ``system-maintenance`` | ``instance-retirement`` | ``instance-stop`` ).
* ``event.description`` - A description of the event.
* ``event.instance-event-id`` - The ID of the event whose date and time you are modifying.
* ``event.not-after`` - The latest end time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``event.not-before`` - The earliest start time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``event.not-before-deadline`` - The deadline for starting the event (for example, ``2014-09-15T17:15:20.000Z`` ).
* ``instance-state-code`` - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).
* ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ).
* ``instance-status.reachability`` - Filters on instance status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ).
* ``instance-status.status`` - The status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ).
* ``system-status.reachability`` - Filters on system status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ).
* ``system-status.status`` - The system status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ).
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type InstanceIds: list
:param InstanceIds:
The instance IDs.
Default: Describes all your instances.
Constraints: Maximum 100 explicitly specified instance IDs.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.
:type NextToken: string
:param NextToken:
The token to retrieve the next page of results.
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type IncludeAllInstances: boolean
:param IncludeAllInstances:
When ``true`` , includes the health status for all instances. When ``false`` , includes the health status for running instances only.
Default: ``false``
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VolumeAvailable(Waiter):
def wait(self, Filters: List = None, VolumeIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_volumes` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VolumeIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``attachment.attach-time`` - The time stamp when the attachment initiated.
* ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination.
* ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ).
* ``attachment.instance-id`` - The ID of the instance the volume is attached to.
* ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ).
* ``availability-zone`` - The Availability Zone in which the volume was created.
* ``create-time`` - The time stamp when the volume was created.
* ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` )
* ``size`` - The size of the volume, in GiB.
* ``snapshot-id`` - The snapshot from which the volume was created.
* ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``volume-id`` - The volume ID.
* ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VolumeIds: list
:param VolumeIds:
The volume IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of volume results returned by ``DescribeVolumes`` in paginated output. When this parameter is used, ``DescribeVolumes`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeVolumes`` request with the returned ``NextToken`` value. This value can be between 5 and 500; if ``MaxResults`` is given a value larger than 500, only 500 results are returned. If this parameter is not used, then ``DescribeVolumes`` returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.
:type NextToken: string
:param NextToken:
The ``NextToken`` value returned from a previous paginated ``DescribeVolumes`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VolumeDeleted(Waiter):
def wait(self, Filters: List = None, VolumeIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_volumes` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VolumeIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``attachment.attach-time`` - The time stamp when the attachment initiated.
* ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination.
* ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ).
* ``attachment.instance-id`` - The ID of the instance the volume is attached to.
* ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ).
* ``availability-zone`` - The Availability Zone in which the volume was created.
* ``create-time`` - The time stamp when the volume was created.
* ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` )
* ``size`` - The size of the volume, in GiB.
* ``snapshot-id`` - The snapshot from which the volume was created.
* ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``volume-id`` - The volume ID.
* ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VolumeIds: list
:param VolumeIds:
The volume IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of volume results returned by ``DescribeVolumes`` in paginated output. When this parameter is used, ``DescribeVolumes`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeVolumes`` request with the returned ``NextToken`` value. This value can be between 5 and 500; if ``MaxResults`` is given a value larger than 500, only 500 results are returned. If this parameter is not used, then ``DescribeVolumes`` returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.
:type NextToken: string
:param NextToken:
The ``NextToken`` value returned from a previous paginated ``DescribeVolumes`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VolumeInUse(Waiter):
def wait(self, Filters: List = None, VolumeIds: List = None, DryRun: bool = None, MaxResults: int = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_volumes` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VolumeIds=[
'string',
],
DryRun=True|False,
MaxResults=123,
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
The filters.
* ``attachment.attach-time`` - The time stamp when the attachment initiated.
* ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination.
* ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ).
* ``attachment.instance-id`` - The ID of the instance the volume is attached to.
* ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ).
* ``availability-zone`` - The Availability Zone in which the volume was created.
* ``create-time`` - The time stamp when the volume was created.
* ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` )
* ``size`` - The size of the volume, in GiB.
* ``snapshot-id`` - The snapshot from which the volume was created.
* ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``volume-id`` - The volume ID.
* ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VolumeIds: list
:param VolumeIds:
The volume IDs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type MaxResults: integer
:param MaxResults:
The maximum number of volume results returned by ``DescribeVolumes`` in paginated output. When this parameter is used, ``DescribeVolumes`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeVolumes`` request with the returned ``NextToken`` value. This value can be between 5 and 500; if ``MaxResults`` is given a value larger than 500, only 500 results are returned. If this parameter is not used, then ``DescribeVolumes`` returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.
:type NextToken: string
:param NextToken:
The ``NextToken`` value returned from a previous paginated ``DescribeVolumes`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VpcAvailable(Waiter):
def wait(self, Filters: List = None, VpcIds: List = None, DryRun: bool = None, NextToken: str = None, MaxResults: int = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_vpcs` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VpcIds=[
'string',
],
DryRun=True|False,
NextToken='string',
MaxResults=123,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``cidr`` - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC\'s CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, ``/28`` ).
* ``cidr-block-association.cidr-block`` - An IPv4 CIDR block associated with the VPC.
* ``cidr-block-association.association-id`` - The association ID for an IPv4 CIDR block associated with the VPC.
* ``cidr-block-association.state`` - The state of an IPv4 CIDR block associated with the VPC.
* ``dhcp-options-id`` - The ID of a set of DHCP options.
* ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the VPC.
* ``ipv6-cidr-block-association.association-id`` - The association ID for an IPv6 CIDR block associated with the VPC.
* ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the VPC.
* ``isDefault`` - Indicates whether the VPC is the default VPC.
* ``owner-id`` - The ID of the AWS account that owns the VPC.
* ``state`` - The state of the VPC (``pending`` | ``available`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VpcIds: list
:param VpcIds:
One or more VPC IDs.
Default: Describes all your VPCs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type NextToken: string
:param NextToken:
The token for the next page of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VpcExists(Waiter):
def wait(self, Filters: List = None, VpcIds: List = None, DryRun: bool = None, NextToken: str = None, MaxResults: int = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_vpcs` every 1 seconds until a successful state is reached. An error is returned after 5 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VpcIds=[
'string',
],
DryRun=True|False,
NextToken='string',
MaxResults=123,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``cidr`` - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC\'s CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, ``/28`` ).
* ``cidr-block-association.cidr-block`` - An IPv4 CIDR block associated with the VPC.
* ``cidr-block-association.association-id`` - The association ID for an IPv4 CIDR block associated with the VPC.
* ``cidr-block-association.state`` - The state of an IPv4 CIDR block associated with the VPC.
* ``dhcp-options-id`` - The ID of a set of DHCP options.
* ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the VPC.
* ``ipv6-cidr-block-association.association-id`` - The association ID for an IPv6 CIDR block associated with the VPC.
* ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the VPC.
* ``isDefault`` - Indicates whether the VPC is the default VPC.
* ``owner-id`` - The ID of the AWS account that owns the VPC.
* ``state`` - The state of the VPC (``pending`` | ``available`` ).
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-id`` - The ID of the VPC.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VpcIds: list
:param VpcIds:
One or more VPC IDs.
Default: Describes all your VPCs.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type NextToken: string
:param NextToken:
The token for the next page of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 1
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 5
:returns: None
"""
pass
class VpcPeeringConnectionDeleted(Waiter):
def wait(self, Filters: List = None, DryRun: bool = None, VpcPeeringConnectionIds: List = None, NextToken: str = None, MaxResults: int = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_vpc_peering_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
VpcPeeringConnectionIds=[
'string',
],
NextToken='string',
MaxResults=123,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``accepter-vpc-info.cidr-block`` - The IPv4 CIDR block of the accepter VPC.
* ``accepter-vpc-info.owner-id`` - The AWS account ID of the owner of the accepter VPC.
* ``accepter-vpc-info.vpc-id`` - The ID of the accepter VPC.
* ``expiration-time`` - The expiration date and time for the VPC peering connection.
* ``requester-vpc-info.cidr-block`` - The IPv4 CIDR block of the requester\'s VPC.
* ``requester-vpc-info.owner-id`` - The AWS account ID of the owner of the requester VPC.
* ``requester-vpc-info.vpc-id`` - The ID of the requester VPC.
* ``status-code`` - The status of the VPC peering connection (``pending-acceptance`` | ``failed`` | ``expired`` | ``provisioning`` | ``active`` | ``deleting`` | ``deleted`` | ``rejected`` ).
* ``status-message`` - A message that provides more information about the status of the VPC peering connection, if applicable.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-peering-connection-id`` - The ID of the VPC peering connection.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type VpcPeeringConnectionIds: list
:param VpcPeeringConnectionIds:
One or more VPC peering connection IDs.
Default: Describes all your VPC peering connections.
- *(string) --*
:type NextToken: string
:param NextToken:
The token for the next page of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VpcPeeringConnectionExists(Waiter):
def wait(self, Filters: List = None, DryRun: bool = None, VpcPeeringConnectionIds: List = None, NextToken: str = None, MaxResults: int = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_vpc_peering_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
DryRun=True|False,
VpcPeeringConnectionIds=[
'string',
],
NextToken='string',
MaxResults=123,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``accepter-vpc-info.cidr-block`` - The IPv4 CIDR block of the accepter VPC.
* ``accepter-vpc-info.owner-id`` - The AWS account ID of the owner of the accepter VPC.
* ``accepter-vpc-info.vpc-id`` - The ID of the accepter VPC.
* ``expiration-time`` - The expiration date and time for the VPC peering connection.
* ``requester-vpc-info.cidr-block`` - The IPv4 CIDR block of the requester\'s VPC.
* ``requester-vpc-info.owner-id`` - The AWS account ID of the owner of the requester VPC.
* ``requester-vpc-info.vpc-id`` - The ID of the requester VPC.
* ``status-code`` - The status of the VPC peering connection (``pending-acceptance`` | ``failed`` | ``expired`` | ``provisioning`` | ``active`` | ``deleting`` | ``deleted`` | ``rejected`` ).
* ``status-message`` - A message that provides more information about the status of the VPC peering connection, if applicable.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``vpc-peering-connection-id`` - The ID of the VPC peering connection.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type VpcPeeringConnectionIds: list
:param VpcPeeringConnectionIds:
One or more VPC peering connection IDs.
Default: Describes all your VPC peering connections.
- *(string) --*
:type NextToken: string
:param NextToken:
The token for the next page of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VpnConnectionAvailable(Waiter):
def wait(self, Filters: List = None, VpnConnectionIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_vpn_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VpnConnectionIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``customer-gateway-configuration`` - The configuration information for the customer gateway.
* ``customer-gateway-id`` - The ID of a customer gateway associated with the VPN connection.
* ``state`` - The state of the VPN connection (``pending`` | ``available`` | ``deleting`` | ``deleted`` ).
* ``option.static-routes-only`` - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).
* ``route.destination-cidr-block`` - The destination CIDR block. This corresponds to the subnet used in a customer data center.
* ``bgp-asn`` - The BGP Autonomous System Number (ASN) associated with a BGP device.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``type`` - The type of VPN connection. Currently the only supported type is ``ipsec.1`` .
* ``vpn-connection-id`` - The ID of the VPN connection.
* ``vpn-gateway-id`` - The ID of a virtual private gateway associated with the VPN connection.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VpnConnectionIds: list
:param VpnConnectionIds:
One or more VPN connection IDs.
Default: Describes your VPN connections.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
class VpnConnectionDeleted(Waiter):
def wait(self, Filters: List = None, VpnConnectionIds: List = None, DryRun: bool = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`EC2.Client.describe_vpn_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections>`_
**Request Syntax**
::
waiter.wait(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
VpnConnectionIds=[
'string',
],
DryRun=True|False,
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Filters: list
:param Filters:
One or more filters.
* ``customer-gateway-configuration`` - The configuration information for the customer gateway.
* ``customer-gateway-id`` - The ID of a customer gateway associated with the VPN connection.
* ``state`` - The state of the VPN connection (``pending`` | ``available`` | ``deleting`` | ``deleted`` ).
* ``option.static-routes-only`` - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).
* ``route.destination-cidr-block`` - The destination CIDR block. This corresponds to the subnet used in a customer data center.
* ``bgp-asn`` - The BGP Autonomous System Number (ASN) associated with a BGP device.
* ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value.
* ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
* ``type`` - The type of VPN connection. Currently the only supported type is ``ipsec.1`` .
* ``vpn-connection-id`` - The ID of the VPN connection.
* ``vpn-gateway-id`` - The ID of a virtual private gateway associated with the VPN connection.
- *(dict) --*
A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:
* DescribeAvailabilityZones
* DescribeImages
* DescribeInstances
* DescribeKeyPairs
* DescribeSecurityGroups
* DescribeSnapshots
* DescribeSubnets
* DescribeTags
* DescribeVolumes
* DescribeVpcs
- **Name** *(string) --*
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --*
The filter values. Filter values are case-sensitive.
- *(string) --*
:type VpnConnectionIds: list
:param VpnConnectionIds:
One or more VPN connection IDs.
Default: Describes your VPN connections.
- *(string) --*
:type DryRun: boolean
:param DryRun:
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` .
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 15
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 40
:returns: None
"""
pass
|
tests/schedules/test_adjustments.py | nicolasiltis/prefect | 8,633 | 12654256 | <reponame>nicolasiltis/prefect
from datetime import timedelta
import pendulum
import pytest
import prefect.schedules.adjustments as adjustments
import prefect.schedules.filters
@pytest.mark.parametrize(
"interval",
[
timedelta(days=1),
timedelta(seconds=0),
timedelta(days=-1),
timedelta(microseconds=1),
],
)
def test_add(interval):
dt = pendulum.now()
adjustment_fn = adjustments.add(interval)
assert adjustment_fn(dt) == dt + interval
@pytest.mark.parametrize("dt", [pendulum.datetime(2019, 1, i) for i in range(1, 10)])
def test_next_weekday(dt):
adjusted = adjustments.next_weekday(dt)
if prefect.schedules.filters.is_weekday(dt):
assert adjusted is dt
else:
assert adjusted > dt and adjusted.weekday() == 0
|
samples/client/petstore/python/tests/test_configuration.py | MalcolmScoffable/openapi-generator | 11,868 | 12654260 | <reponame>MalcolmScoffable/openapi-generator
# coding: utf-8
# flake8: noqa
"""
Run the tests.
$ pip install nose (optional)
$ cd petstore_api-python
$ nosetests -v
"""
from __future__ import absolute_import
import unittest
import petstore_api
class TestConfiguration(unittest.TestCase):
"""Animal unit test stubs"""
def setUp(self):
pass
def tearDown(self):
# reset Configuration
petstore_api.Configuration.set_default(None)
def testConfiguration(self):
# check that different instances use different dictionaries
c1 = petstore_api.Configuration()
c2 = petstore_api.Configuration()
self.assertNotEqual(id(c1.api_key), id(c2.api_key))
self.assertNotEqual(id(c1.api_key_prefix), id(c2.api_key_prefix))
def testDefaultConfiguration(self):
# prepare default configuration
c1 = petstore_api.Configuration(host="example.com")
c1.debug = True
petstore_api.Configuration.set_default(c1)
# get default configuration
c2 = petstore_api.Configuration.get_default_copy()
self.assertEqual(c2.host, "example.com")
self.assertTrue(c2.debug)
self.assertNotEqual(id(c1.api_key), id(c2.api_key))
self.assertNotEqual(id(c1.api_key_prefix), id(c2.api_key_prefix))
if __name__ == '__main__':
unittest.main()
|
krsh/config/__init__.py | riiid/krsh | 133 | 12654280 | <reponame>riiid/krsh
# Copyright 2021 AIOps Squad, Riiid Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .config import PROJECT_CONFIG_FNAME
from .config import PIPELINE_CONFIG_FNAME
from .config import ProjectConfig
from .config import PipelineConfig
from .config import get_project_config
from .config import get_pipeline_config
__all__ = [
"PROJECT_CONFIG_FNAME",
"PIPELINE_CONFIG_FNAME",
"ProjectConfig",
"PipelineConfig",
"get_project_config",
"get_pipeline_config",
]
|
tests/bento_service_examples/local_dependencies/local_module/__init__.py | co42/BentoML | 3,451 | 12654293 | <reponame>co42/BentoML
def dependency_in_local_module_directory(foo):
return foo
|
src/test-apps/happy/bin/weave-bdx.py | robszewczyk/openweave-core | 249 | 12654304 | <reponame>robszewczyk/openweave-core
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# A Happy command line utility that tests Weave BDX among Weave nodes.
#
# The command is executed by instantiating and running WeaveBDX class.
#
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import set_test_path
from happy.Utils import *
import WeaveBDX
if __name__ == "__main__":
options = WeaveBDX.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "hqs:c:t:r:u:d:o:l:p:S:C:",
["help", "quiet", "server=", "client=", "tmp=", "receive=","upload=",
"download=", "offset=", "length=", "tap=", "server-version=", "client-version="])
except getopt.GetoptError as err:
print(WeaveBDX.WeaveBDX.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed destination parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print(WeaveBDX.WeaveBDX.__doc__)
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-s", "--server"):
options["server"] = a
elif o in ("-c", "--client"):
options["client"] = a
elif o in ("-t", "--tmp"):
options["tmp"] = a
elif o in ("-r", "--receive"):
options["receive"] = a
elif o in ("-u", "--upload"):
options["upload"] = a
elif o in ("-d", "--download"):
options["download"] = a
elif o in ("-o", "--offset"):
options["offset"] = a
elif o in ("-l", "--length"):
options["length"] = a
elif o in ("-p", "--tap"):
options["tap"] = a
elif o in ("-S", "--server-version"):
options["server_version"] = int(a)
elif o in ("-C", "--client-version"):
options["client_version"] = int(a)
else:
assert False, "unhandled option"
cmd = WeaveBDX.WeaveBDX(options)
cmd.start()
|
spytest/apis/common/redis.py | shubav/sonic-mgmt | 132 | 12654316 | import re
import json
from spytest import st
APPL_DB = "APPL_DB"
ASIC_DB = "ASIC_DB"
COUNTERS_DB = "COUNTERS_DB"
LOGLEVEL_DB = "LOGLEVEL_DB"
CONFIG_DB = "CONFIG_DB"
PFC_WD_DB = "PFC_WD_DB"
FLEX_COUNTER_DB = "FLEX_COUNTER_DB"
STATE_DB = "STATE_DB"
SNMP_OVERLAY_DB = "SNMP_OVERLAY_DB"
ERROR_DB = "ERROR_DB"
########################## TODO ####################################
# read db_port_map from /var/run/redis/sonic-db/database_config.json
####################################################################
db_id_map = {
APPL_DB: 0,
ASIC_DB: 1,
COUNTERS_DB: 2,
LOGLEVEL_DB: 3,
CONFIG_DB: 4,
PFC_WD_DB: 5,
FLEX_COUNTER_DB: 5,
STATE_DB: 6,
SNMP_OVERLAY_DB: 7,
ERROR_DB: 8
}
# Port map used for A/A+/B/B-MR
db_default_port_map = {
APPL_DB: 6379,
ASIC_DB: 6379,
COUNTERS_DB: 6379,
LOGLEVEL_DB: 6379,
CONFIG_DB: 6379,
PFC_WD_DB: 6379,
FLEX_COUNTER_DB: 6379,
STATE_DB: 6379,
SNMP_OVERLAY_DB: 6379,
ERROR_DB: 6379
}
# Read /var/run/redis/sonic-db/database_config.json on DUT and populate db_port_map
db_port_map = {}
# 0 - use redis-cli
# 1 - use redis-cli -p
# 2 - use sonic-db-cli
def db_cli_init(dut):
db_map_read(dut)
db_cli = st.getenv("SPYTEST_REDIS_DB_CLI_TYPE", "1")
if db_cli in ["1", "2"]: db_map_read(dut)
if db_cli in ["0", "1", "2"]: return db_cli
output = st.show(dut,'ls /usr/local/bin/sonic-db-cli',skip_tmpl=True)
return "0" if re.search(r'No such file or directory',output) else "2"
def db_map_read(dut):
global db_port_map
db_dict = None
db_json = st.config(dut, "cat /var/run/redis/sonic-db/database_config.json").split("\n")
db_json.pop()
try:
db_dict = json.loads("".join(db_json))
db_instances = db_dict.get("INSTANCES")
for db_name, db_data in db_dict.get("DATABASES").items():
db_port_map[db_name] = db_instances[db_data["instance"]].get("port")
except Exception:
db_port_map = db_default_port_map
def _prefix(dut, db, suffix="cli"):
db_cli = st.get_dut_var(dut, "redis_db_cli")
if db and db not in db_id_map:
raise ValueError("Unknown DB name {} in ID Map".format(db))
if db and db not in db_port_map:
raise ValueError("Unknown DB name {} in Port Map".format(db))
if db_cli == "2":
return "sonic-db-{} {}".format(suffix, db or "")
if db_cli == "1":
cmd = "redis-{} -p {}".format(suffix, db_port_map[db])
else:
cmd = "redis-{}".format(suffix)
return "{} -n {}".format(cmd, db_id_map[db]) if db else cmd
def scan(dut, db, pattern, skip_error_check=False):
cmd="{} --scan --pattern '{}'".format(_prefix(dut, db), pattern)
return st.config(dut, cmd, skip_error_check=skip_error_check)
def dump(dut, db, pattern, skip_error_check=False):
cmd="{} -k '{}' -y".format(_prefix(dut, db, "dump"), pattern)
return st.config(dut, cmd, skip_error_check=skip_error_check)
def build(dut, db, cmd):
return "{} {}".format(_prefix(dut, db), cmd)
def config(dut, db, cmd, skip_error_check=False):
dev_cmd = build(dut, db, cmd)
return st.config(dut, dev_cmd, skip_error_check=skip_error_check)
def show(dut, db, cmd, skip_tmpl=False):
dev_cmd = build(dut, db, cmd)
return st.show(dut, dev_cmd, skip_tmpl=skip_tmpl)
|
note5/code/cnn.py | fluffyrita/LearnPaddle | 367 | 12654323 | # coding:utf-8
import paddle.v2 as paddle
# 卷积神经网络LeNet-5,获取分类器
def convolutional_neural_network(datadim, type_size):
image = paddle.layer.data(name="image",
type=paddle.data_type.dense_vector(datadim))
# 第一个卷积--池化层
conv_pool_1 = paddle.networks.simple_img_conv_pool(input=image,
filter_size=5,
num_filters=20,
num_channel=1,
pool_size=2,
pool_stride=2,
act=paddle.activation.Relu())
# 第二个卷积--池化层
conv_pool_2 = paddle.networks.simple_img_conv_pool(input=conv_pool_1,
filter_size=5,
num_filters=50,
num_channel=20,
pool_size=2,
pool_stride=2,
act=paddle.activation.Relu())
# 以softmax为激活函数的全连接输出层
out = paddle.layer.fc(input=conv_pool_2,
size=type_size,
act=paddle.activation.Softmax())
return out
|
sample models/Bank, 3 clerks (resources).py | akharitonov/salabim | 151 | 12654334 | # Bank, 3 clerks (resources).py
import salabim as sim
class CustomerGenerator(sim.Component):
def process(self):
while True:
Customer()
yield self.hold(sim.Uniform(5, 15).sample())
class Customer(sim.Component):
def process(self):
yield self.request(clerks)
yield self.hold(30)
self.release() # not really required
env = sim.Environment(trace=False)
CustomerGenerator()
clerks = sim.Resource("clerks", capacity=3)
env.run(till=50000)
clerks.print_statistics()
clerks.print_info()
|
underworld/libUnderworld/configure.py | longgangfan/underworld2 | 116 | 12654363 | #!/usr/bin/env python3
import sys, subprocess
subp = subprocess.Popen(
'python3 `which scons` --config=force -f SConfigure ' + ' '.join(sys.argv[1:]), shell=True
)
subp.wait()
# return the return code
sys.exit(subp.returncode)
|
django_su/forms.py | marknotfound/django-su | 123 | 12654372 | <reponame>marknotfound/django-su<gh_stars>100-1000
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
User = get_user_model()
class UserSuForm(forms.Form):
username_field = User.USERNAME_FIELD
user = forms.ModelChoiceField(
label=_('Users'), queryset=User._default_manager.order_by(
username_field), required=True) # pylint: disable=W0212
use_ajax_select = False
def __init__(self, *args, **kwargs):
super(UserSuForm, self).__init__(*args, **kwargs)
if 'ajax_select' in settings.INSTALLED_APPS and getattr(
settings, 'AJAX_LOOKUP_CHANNELS', None):
from ajax_select.fields import AutoCompleteSelectField
lookup = settings.AJAX_LOOKUP_CHANNELS.get('django_su', None)
if lookup is not None:
old_field = self.fields['user']
self.fields['user'] = AutoCompleteSelectField(
'django_su',
required=old_field.required,
label=old_field.label,
)
self.use_ajax_select = True
def get_user(self):
return self.cleaned_data.get('user', None)
def __str__(self):
if 'formadmin' in settings.INSTALLED_APPS:
try:
from formadmin.forms import as_django_admin
return as_django_admin(self)
except ImportError:
pass
return super(UserSuForm, self).__str__()
|
nel/util.py | psyML/nel | 196 | 12654374 | import six
import Queue
import socket
import multiprocessing
from time import time
from itertools import chain
from collections import defaultdict
from bisect import bisect_left, bisect_right
from contextlib import contextmanager
from nel import logging
log = logging.getLogger()
def get_from_module(cid, mod_params, mod_name, instantiate=False, kwargs=None):
if isinstance(cid, six.string_types):
res = mod_params.get(cid)
if not res:
raise Exception('Invalid ' + str(mod_name) + ': ' + str(cid))
if instantiate and not kwargs:
return res()
elif instantiate and kwargs:
return res(**kwargs)
else:
return res
return cid
def byte_to_char_map(byte_str, encoding='utf-8'):
mapping = {}
char_str = byte_str.decode(encoding)
byte_offset, char_offset = 0, 0
for char_offset, c in enumerate(char_str):
mapping[byte_offset] = char_offset
byte_offset += len(c.encode(encoding))
mapping[byte_offset] = char_offset
return mapping
def group(iteration, key_getter, value_getter):
d = defaultdict(list)
for item in iteration:
d[key_getter(item)].append(value_getter(item))
return d
def invert_grouping(g):
d = defaultdict(list)
for k, items in g.iteritems():
for i in items:
d[i].append(k)
return d
def spanset_insert(indicies, begin, end):
""" Determines if a span from an index set is occupied in O(log(n)) """
b_idx = bisect_right(indicies, begin)
e_idx = bisect_left(indicies, end)
can_insert = b_idx == e_idx and \
(b_idx == 0 or indicies[b_idx - 1] != begin) and \
(e_idx == len(indicies) or indicies[e_idx] != end) and \
b_idx % 2 == 0
if can_insert:
indicies.insert(b_idx, begin)
indicies.insert(b_idx + 1, end)
return can_insert
def spawn_worker(f):
def fun(wid, q_in, q_out, recycle_interval):
job_count = 0
while True:
i,x = q_in.get()
if i is None:
break
try:
recycle_id = wid if job_count + 1 == recycle_interval else None
q_out.put(((i, f(x)), recycle_id))
job_count += 1
if recycle_id != None:
return
except Exception as e:
log.error("Worker function exception: %s" % e)
raise
return fun
def iter_to_input_queue(iteration, q_in, p_control):
iteration_len = 0
for i, x in enumerate(iteration):
q_in.put((i, x))
iteration_len += 1
p_control.send(iteration_len)
p_control.close()
class parmapper(object):
def __init__(self, job, nprocs = None, recycle_interval = 5):
if nprocs == None:
nprocs = multiprocessing.cpu_count() - 1
self.job = job
self.q_in = multiprocessing.Queue(1)
self.q_out = multiprocessing.Queue(nprocs)
self.recycle_interval = recycle_interval
self.procs = [self.get_process(i) for i in range(nprocs)]
def get_process(self, idx):
return multiprocessing.Process(
target=spawn_worker(self.job),
args=(idx, self.q_in, self.q_out, self.recycle_interval))
def run_process(self, idx):
self.procs[idx].daemon = True
self.procs[idx].start()
def __enter__(self):
for i in xrange(len(self.procs)):
self.run_process(i)
return self
def recycle_worker(self, wid):
worker = self.procs[wid]
#log.debug('Recycling worker id=%i, pid=%i...' % (wid, worker.pid))
worker.join()
self.procs[wid] = self.get_process(wid)
self.run_process(wid)
def consume(self, producer):
worker_pipe, control_pipe = multiprocessing.Pipe(True)
async_input_iterator = multiprocessing.Process(target=iter_to_input_queue,args=(producer, self.q_in, worker_pipe))
async_input_iterator.daemon = True
async_input_iterator.start()
expected_output_count = None
output_count = 0
while expected_output_count == None or expected_output_count > output_count:
if expected_output_count == None and control_pipe.poll():
expected_output_count = control_pipe.recv()
#log.debug('Producer exhausted with %i items total, %i remaining...' % (expected_output_count, expected_output_count - output_count))
try:
# couldn't get this working without a busy wait
out, recycle_wid = self.q_out.get_nowait()
while True:
if recycle_wid != None:
self.recycle_worker(recycle_wid)
yield out
output_count += 1
out, recycle_wid = self.q_out.get_nowait()
except Queue.Empty: pass
async_input_iterator.join()
def __exit__(self, t, value, traceback):
for _ in self.procs:
self.q_in.put((None,None))
for p in self.procs:
p.join() # todo: kill after some timeout
@contextmanager
def tcp_socket(host,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host,port))
yield s
finally:
try:
s.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
except OSError:
pass
finally:
s.close()
class trie(object):
def __init__(self):
self.Children = defaultdict(trie)
self.Matches = set()
def insert_many(self, sequence, entities):
if len(entities) > 0:
self._insert(sequence, entities, 0, True)
def insert(self, sequence, e):
self._insert(sequence, e, 0, False)
def _insert(self, sequence, e, offset, multi):
if offset < len(sequence):
item = sequence[offset]
self.Children[item]._insert(sequence, e, offset + 1, multi)
else:
if multi:
for entity in e:
self.Matches.add((entity, offset))
else:
self.Matches.add((e, offset))
def iter_matches(self):
for e in self.Matches: yield e
def scan(self, seq):
for i in xrange(0, len(seq)):
for m in self.match(seq, i, True, True):
yield m
def match(self, seq, offset = 0, subsequences = False, inorder = True):
# if we are yielding subsequence matches, or the sequence
# is complete return all entities for the current node
current = [(e, (offset - length, offset)) for e, length in self.iter_matches()] if subsequences or offset == len(seq) else None
# iteration for the next items in the sequence
pending = None
if seq and offset < len(seq):
token = seq[offset]
if token in self.Children:
pending = self.Children[token].match(seq, offset + 1, subsequences, inorder)
if current and pending:
return chain(current, pending) if inorder else chain(pending, current)
return current or pending or []
|
tests/core/sinkhorn_test.py | MUCDK/ott | 232 | 12654386 | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the Policy."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax.test_util
from ott.core import sinkhorn
from ott.geometry import costs
from ott.geometry import geometry
from ott.geometry import pointcloud
class SinkhornTest(jax.test_util.JaxTestCase):
def setUp(self):
super().setUp()
self.rng = jax.random.PRNGKey(0)
self.dim = 4
self.n = 68
self.m = 123
self.rng, *rngs = jax.random.split(self.rng, 5)
self.x = jax.random.uniform(rngs[0], (self.n, self.dim))
self.y = jax.random.uniform(rngs[1], (self.m, self.dim))
a = jax.random.uniform(rngs[2], (self.n,))
b = jax.random.uniform(rngs[3], (self.m,))
# adding zero weights to test proper handling
a = a.at[0].set(0)
b = b.at[3].set(0)
self.a = a / jnp.sum(a)
self.b = b / jnp.sum(b)
@parameterized.named_parameters(
dict(
testcase_name='lse-Leh-mom',
lse_mode=True,
momentum=1.0,
chg_momentum_from=29,
inner_iterations=10,
norm_error=1),
dict(
testcase_name='lse-small-mom',
lse_mode=True,
momentum=1.01,
chg_momentum_from=0,
inner_iterations=10,
norm_error=1),
dict(
testcase_name='lse-high-mom',
lse_mode=True,
momentum=1.5,
chg_momentum_from=0,
inner_iterations=10,
norm_error=1),
dict(
testcase_name='scal-Leh-mom',
lse_mode=False,
momentum=1.01,
chg_momentum_from=30,
inner_iterations=10,
norm_error=1),
dict(
testcase_name='scal-no-mom',
lse_mode=False,
momentum=1.0,
chg_momentum_from=0,
inner_iterations=10,
norm_error=1,
),
dict(
testcase_name='scal-high-mom',
lse_mode=False,
momentum=1.5,
chg_momentum_from=0,
inner_iterations=10,
norm_error=1,
),
dict(
testcase_name='lse-Leh-1',
lse_mode=True,
momentum=1.0,
chg_momentum_from=60,
inner_iterations=1,
norm_error=2),
dict(
testcase_name='lse-Leh-13',
lse_mode=True,
momentum=1.0,
chg_momentum_from=40,
inner_iterations=13,
norm_error=3,
),
dict(
testcase_name='lse-Leh-24',
lse_mode=True,
momentum=1.0,
chg_momentum_from=12,
inner_iterations=24,
norm_error=4,
))
def test_euclidean_point_cloud(self, lse_mode, momentum, chg_momentum_from,
inner_iterations, norm_error):
"""Two point clouds, tested with various parameters."""
threshold = 1e-3
geom = pointcloud.PointCloud(self.x, self.y, epsilon=0.1)
errors = sinkhorn.sinkhorn(
geom,
a=self.a,
b=self.b,
threshold=threshold,
momentum=momentum,
chg_momentum_from=chg_momentum_from,
inner_iterations=inner_iterations,
norm_error=norm_error,
lse_mode=lse_mode).errors
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
def test_autoepsilon(self):
"""Check that with auto-epsilon, dual potentials scale."""
scale = 2.77
# First geom specifies explicitly relative_epsilon to be True. This is not
# needed in principle, but introduced here to test logic.
geom_1 = pointcloud.PointCloud(self.x, self.y, relative_epsilon=True)
# jit first with jit inside sinkhorn call.
f_1 = sinkhorn.sinkhorn(
geom_1, a=self.a, b=self.b, tau_a=.99, tau_b=.97, jit=True).f
# Second geom does not provide whether epsilon is relative.
geom_2 = pointcloud.PointCloud(scale * self.x, scale * self.y)
# jit now with jit outside sinkhorn call.
compute_f = jax.jit(
lambda g, a, b: sinkhorn.sinkhorn(g, a, b, tau_a=.99, tau_b=.97).f)
f_2 = compute_f(geom_2, self.a, self.b)
# Ensure epsilon and optimal f's are a scale^2 apart (^2 comes from ^2 cost)
self.assertAllClose(geom_1.epsilon * scale**2, geom_2.epsilon,
rtol=1e-3, atol=1e-3)
self.assertAllClose(geom_1._epsilon.at(2) * scale**2,
geom_2._epsilon.at(2),
rtol=1e-3, atol=1e-3)
self.assertAllClose(f_1 * scale**2, f_2, rtol=1e-3, atol=1e-3)
@parameterized.product(
lse_mode=[True, False],
init=[2, 5],
decay=[.8, .9],
tau_a=[1.0, .93],
tau_b=[1.0, .91])
def test_autoepsilon_with_decay(self, lse_mode, init, decay, tau_a, tau_b):
"""Check that variations in init/decay work, and result in same solution."""
geom = pointcloud.PointCloud(self.x, self.y, init=init, decay=decay)
out_1 = sinkhorn.sinkhorn(
geom, a=self.a, b=self.b, tau_a=tau_a, tau_b=tau_b, jit=True,
threshold=1e-5)
geom = pointcloud.PointCloud(self.x, self.y)
out_2 = sinkhorn.sinkhorn(
geom, a=self.a, b=self.b, tau_a=tau_a, tau_b=tau_b, jit=True,
threshold=1e-5)
# recenter if problem is balanced, since in that case solution is only
# valid up to additive constant.
unb = (tau_a < 1.0 or tau_b < 1.0)
self.assertAllClose(
out_1.f if unb else out_1.f - jnp.mean(out_1.f[jnp.isfinite(out_1.f)]),
out_2.f if unb else out_2.f - jnp.mean(out_2.f[jnp.isfinite(out_2.f)]),
rtol=1e-4, atol=1e-4)
def test_euclidean_point_cloud_min_iter(self):
"""Testing the min_iterations parameter."""
threshold = 1e-3
geom = pointcloud.PointCloud(self.x, self.y, epsilon=0.1)
errors = sinkhorn.sinkhorn(
geom, a=self.a, b=self.b, threshold=threshold, min_iterations=34,
implicit_differentiation=False).errors
err = errors[jnp.logical_and(errors > -1, jnp.isfinite(errors))][-1]
self.assertGreater(threshold, err)
self.assertEqual(jnp.inf, errors[0])
self.assertEqual(jnp.inf, errors[1])
self.assertEqual(jnp.inf, errors[2])
self.assertGreater(errors[3], 0)
def test_geom_vs_point_cloud(self):
"""Two point clouds vs. simple cost_matrix execution of sinkorn."""
geom_1 = pointcloud.PointCloud(self.x, self.y)
geom_2 = geometry.Geometry(geom_1.cost_matrix)
f_1 = sinkhorn.sinkhorn(geom_1, a=self.a, b=self.b).f
f_2 = sinkhorn.sinkhorn(geom_2, a=self.a, b=self.b).f
# recentering to remove ambiguity on equality up to additive constant.
f_1 -= jnp.mean(f_1[jnp.isfinite(f_1)])
f_2 -= jnp.mean(f_2[jnp.isfinite(f_2)])
self.assertAllClose(f_1, f_2)
@parameterized.parameters([True], [False])
def test_euclidean_point_cloud_parallel_weights(self, lse_mode):
"""Two point clouds, parallel execution for batched histograms."""
self.rng, *rngs = jax.random.split(self.rng, 2)
batch = 4
a = jax.random.uniform(rngs[0], (batch, self.n))
b = jax.random.uniform(rngs[0], (batch, self.m))
a = a / jnp.sum(a, axis=1)[:, jnp.newaxis]
b = b / jnp.sum(b, axis=1)[:, jnp.newaxis]
threshold = 1e-3
geom = pointcloud.PointCloud(
self.x, self.y, epsilon=0.1, online=True)
errors = sinkhorn.sinkhorn(
geom, a=self.a, b=self.b, threshold=threshold, lse_mode=lse_mode).errors
err = errors[errors > -1][-1]
self.assertGreater(jnp.min(threshold - err), 0)
@parameterized.parameters([True], [False])
def test_online_euclidean_point_cloud(self, lse_mode):
"""Testing the online way to handle geometry."""
threshold = 1e-3
geom = pointcloud.PointCloud(
self.x, self.y, epsilon=0.1, online=True)
errors = sinkhorn.sinkhorn(
geom, a=self.a, b=self.b, threshold=threshold, lse_mode=lse_mode).errors
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
@parameterized.parameters([True], [False])
def test_online_vs_batch_euclidean_point_cloud(self, lse_mode):
"""Comparing online vs batch geometry."""
threshold = 1e-3
eps = 0.1
online_geom = pointcloud.PointCloud(
self.x, self.y, epsilon=eps, online=True)
online_geom_euc = pointcloud.PointCloud(
self.x,
self.y,
cost_fn=costs.Euclidean(),
epsilon=eps,
online=True)
batch_geom = pointcloud.PointCloud(self.x, self.y, epsilon=eps)
batch_geom_euc = pointcloud.PointCloud(
self.x,
self.y,
cost_fn=costs.Euclidean(),
epsilon=eps)
out_online = sinkhorn.sinkhorn(
online_geom, a=self.a, b=self.b, threshold=threshold, lse_mode=lse_mode)
out_batch = sinkhorn.sinkhorn(
batch_geom, a=self.a, b=self.b, threshold=threshold, lse_mode=lse_mode)
out_online_euc = sinkhorn.sinkhorn(
online_geom_euc,
a=self.a,
b=self.b,
threshold=threshold,
lse_mode=lse_mode)
out_batch_euc = sinkhorn.sinkhorn(
batch_geom_euc,
a=self.a,
b=self.b,
threshold=threshold,
lse_mode=lse_mode)
# Checks regularized transport costs match.
self.assertAllClose(out_online.reg_ot_cost, out_batch.reg_ot_cost)
# check regularized transport matrices match
self.assertAllClose(
online_geom.transport_from_potentials(out_online.f, out_online.g),
batch_geom.transport_from_potentials(out_batch.f, out_batch.g))
self.assertAllClose(
online_geom_euc.transport_from_potentials(out_online_euc.f,
out_online_euc.g),
batch_geom_euc.transport_from_potentials(out_batch_euc.f,
out_batch_euc.g))
self.assertAllClose(
batch_geom.transport_from_potentials(out_batch.f, out_batch.g),
batch_geom_euc.transport_from_potentials(out_batch_euc.f,
out_batch_euc.g))
def test_apply_transport_geometry_from_potentials(self):
"""Applying transport matrix P on vector without instantiating P."""
n, m, d = 160, 230, 6
keys = jax.random.split(self.rng, 6)
x = jax.random.uniform(keys[0], (n, d))
y = jax.random.uniform(keys[1], (m, d))
a = jax.random.uniform(keys[2], (n,))
b = jax.random.uniform(keys[3], (m,))
a = a / jnp.sum(a)
b = b / jnp.sum(b)
transport_t_vec_a = [None, None, None, None]
transport_vec_b = [None, None, None, None]
batch_b = 8
vec_a = jax.random.normal(keys[4], (n,))
vec_b = jax.random.normal(keys[5], (batch_b, m))
# test with lse_mode and online = True / False
for j, lse_mode in enumerate([True, False]):
for i, online in enumerate([True, False]):
geom = pointcloud.PointCloud(x, y, online=online, epsilon=0.2)
sink = sinkhorn.sinkhorn(geom, a, b, lse_mode=lse_mode)
transport_t_vec_a[i + 2 * j] = geom.apply_transport_from_potentials(
sink.f, sink.g, vec_a, axis=0)
transport_vec_b[i + 2 * j] = geom.apply_transport_from_potentials(
sink.f, sink.g, vec_b, axis=1)
transport = geom.transport_from_potentials(sink.f, sink.g)
self.assertAllClose(
transport_t_vec_a[i + 2 * j],
jnp.dot(transport.T, vec_a).T,
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
transport_vec_b[i + 2 * j],
jnp.dot(transport, vec_b.T).T,
rtol=1e-3,
atol=1e-3)
for i in range(4):
self.assertAllClose(
transport_vec_b[i], transport_vec_b[0], rtol=1e-3, atol=1e-3)
self.assertAllClose(
transport_t_vec_a[i], transport_t_vec_a[0], rtol=1e-3, atol=1e-3)
def test_apply_transport_geometry_from_scalings(self):
"""Applying transport matrix P on vector without instantiating P."""
n, m, d = 160, 230, 6
keys = jax.random.split(self.rng, 6)
x = jax.random.uniform(keys[0], (n, d))
y = jax.random.uniform(keys[1], (m, d))
a = jax.random.uniform(keys[2], (n,))
b = jax.random.uniform(keys[3], (m,))
a = a / jnp.sum(a)
b = b / jnp.sum(b)
transport_t_vec_a = [None, None, None, None]
transport_vec_b = [None, None, None, None]
batch_b = 8
vec_a = jax.random.normal(keys[4], (n,))
vec_b = jax.random.normal(keys[5], (batch_b, m))
# test with lse_mode and online = True / False
for j, lse_mode in enumerate([True, False]):
for i, online in enumerate([True, False]):
geom = pointcloud.PointCloud(x, y, online=online, epsilon=0.2)
sink = sinkhorn.sinkhorn(geom, a, b, lse_mode=lse_mode)
u = geom.scaling_from_potential(sink.f)
v = geom.scaling_from_potential(sink.g)
transport_t_vec_a[i + 2 * j] = geom.apply_transport_from_scalings(
u, v, vec_a, axis=0)
transport_vec_b[i + 2 * j] = geom.apply_transport_from_scalings(
u, v, vec_b, axis=1)
transport = geom.transport_from_scalings(u, v)
self.assertAllClose(
transport_t_vec_a[i + 2 * j],
jnp.dot(transport.T, vec_a).T,
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
transport_vec_b[i + 2 * j],
jnp.dot(transport, vec_b.T).T,
rtol=1e-3,
atol=1e-3)
self.assertIsNot(jnp.any(jnp.isnan(transport_t_vec_a[i + 2 * j])),
True)
for i in range(4):
self.assertAllClose(
transport_vec_b[i], transport_vec_b[0], rtol=1e-3, atol=1e-3)
self.assertAllClose(
transport_t_vec_a[i], transport_t_vec_a[0], rtol=1e-3, atol=1e-3)
@parameterized.parameters([True], [False])
def test_restart(self, lse_mode):
"""Two point clouds, tested with various parameters."""
threshold = 1e-4
geom = pointcloud.PointCloud(self.x, self.y, epsilon=0.01)
out = sinkhorn.sinkhorn(
geom,
a=self.a,
b=self.b,
threshold=threshold,
lse_mode=lse_mode,
inner_iterations=1)
errors = out.errors
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
# recover solution from previous and ensure faster convergence.
if lse_mode:
init_dual_a, init_dual_b = out.f, out.g
else:
init_dual_a, init_dual_b = (geom.scaling_from_potential(out.f),
geom.scaling_from_potential(out.g))
out_restarted = sinkhorn.sinkhorn(
geom,
a=self.a,
b=self.b,
threshold=threshold,
lse_mode=lse_mode,
init_dual_a=init_dual_a,
init_dual_b=init_dual_b,
inner_iterations=1)
errors_restarted = out_restarted.errors
err_restarted = errors_restarted[errors_restarted > -1][-1]
self.assertGreater(threshold, err_restarted)
num_iter_restarted = jnp.sum(errors_restarted > -1)
# check we can only improve on error
self.assertGreater(err, err_restarted)
# check first error in restart does at least as well as previous best
self.assertGreater(err, errors_restarted[0])
# check only one iteration suffices when restarting with same data.
self.assertEqual(num_iter_restarted, 1)
if __name__ == '__main__':
absltest.main()
|
scvelo/core/tests/test_arithmetic.py | WeilerP/scvelo | 272 | 12654393 | <gh_stars>100-1000
from typing import List
from hypothesis import given
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
import numpy as np
from numpy import ndarray
from numpy.testing import assert_almost_equal, assert_array_equal
from scvelo.core import clipped_log, invert, prod_sum, sum
class TestClippedLog:
@given(
a=arrays(
float,
shape=st.integers(min_value=1, max_value=100),
elements=st.floats(
min_value=-1e3, max_value=1e3, allow_infinity=False, allow_nan=False
),
),
bounds=st.lists(
st.floats(
min_value=0, max_value=100, allow_infinity=False, allow_nan=False
),
min_size=2,
max_size=2,
unique=True,
),
eps=st.floats(
min_value=1e-6, max_value=1, allow_infinity=False, allow_nan=False
),
)
def test_flat_arrays(self, a: ndarray, bounds: List[float], eps: float):
lb = min(bounds)
ub = max(bounds) + 2 * eps
a_logged = clipped_log(a, lb=lb, ub=ub, eps=eps)
assert a_logged.shape == a.shape
if (a <= lb).any():
assert_almost_equal(np.abs(a_logged - np.log(lb + eps)).min(), 0)
else:
assert (a_logged >= np.log(lb + eps)).all()
if (a >= ub).any():
assert_almost_equal(np.abs(a_logged - np.log(ub - eps)).min(), 0)
else:
assert (a_logged <= np.log(ub - eps)).all()
@given(
a=arrays(
float,
shape=st.tuples(
st.integers(min_value=1, max_value=100),
st.integers(min_value=1, max_value=100),
),
elements=st.floats(
min_value=-1e3, max_value=1e3, allow_infinity=False, allow_nan=False
),
),
bounds=st.lists(
st.floats(
min_value=0, max_value=100, allow_infinity=False, allow_nan=False
),
min_size=2,
max_size=2,
unique=True,
),
eps=st.floats(
min_value=1e-6, max_value=1, allow_infinity=False, allow_nan=False
),
)
def test_2d_arrays(self, a: ndarray, bounds: List[float], eps: float):
lb = min(bounds)
ub = max(bounds) + 2 * eps
a_logged = clipped_log(a, lb=lb, ub=ub, eps=eps)
assert a_logged.shape == a.shape
if (a <= lb).any():
assert_almost_equal(np.abs(a_logged - np.log(lb + eps)).min(), 0)
else:
assert (a_logged >= np.log(lb + eps)).all()
if (a >= ub).any():
assert_almost_equal(np.abs(a_logged - np.log(ub - eps)).min(), 0)
else:
assert (a_logged <= np.log(ub - eps)).all()
class TestInvert:
@given(
a=arrays(
float,
shape=st.integers(min_value=1, max_value=100),
elements=st.floats(max_value=1e3, allow_infinity=False, allow_nan=False),
)
)
def test_flat_arrays(self, a: ndarray):
a_inv = invert(a)
if a[a != 0].size == 0:
assert a_inv[a != 0].size == 0
else:
assert_array_equal(a_inv[a != 0], 1 / a[a != 0])
if 0 in a:
assert np.isnan(a_inv[a == 0]).all()
else:
assert set(a_inv[a == 0]) == set()
@given(
a=arrays(
float,
shape=st.tuples(
st.integers(min_value=1, max_value=100),
st.integers(min_value=1, max_value=100),
),
elements=st.floats(max_value=1e3, allow_infinity=False, allow_nan=False),
)
)
def test_2d_arrays(self, a: ndarray):
a_inv = invert(a)
if a[a != 0].size == 0:
assert a_inv[a != 0].size == 0
else:
assert_array_equal(a_inv[a != 0], 1 / a[a != 0])
if 0 in a:
assert np.isnan(a_inv[a == 0]).all()
else:
assert set(a_inv[a == 0]) == set()
# TODO: Extend test to generate sparse inputs as well
# TODO: Make test to generate two different arrays a1, a2
# TODO: Check why tests fail with assert_almost_equal
class TestProdSum:
@given(
a=arrays(
float,
shape=st.integers(min_value=1, max_value=100),
elements=st.floats(max_value=1e3, allow_infinity=False, allow_nan=False),
),
axis=st.integers(min_value=0, max_value=1),
)
def test_flat_array(self, a: ndarray, axis: int):
assert np.allclose((a * a).sum(axis=0), prod_sum(a, a, axis=axis))
@given(
a=arrays(
float,
shape=st.tuples(
st.integers(min_value=1, max_value=100),
st.integers(min_value=1, max_value=100),
),
elements=st.floats(max_value=1e3, allow_infinity=False, allow_nan=False),
),
axis=st.integers(min_value=0, max_value=1),
)
def test_2d_array(self, a: ndarray, axis: int):
assert np.allclose((a * a).sum(axis=axis), prod_sum(a, a, axis=axis))
# TODO: Extend test to generate sparse inputs as well
class TestSum:
@given(
a=arrays(
float,
shape=st.integers(min_value=1, max_value=100),
elements=st.floats(max_value=1e3, allow_infinity=False, allow_nan=False),
),
)
def test_flat_arrays(self, a: ndarray):
a_summed = sum(a=a, axis=0)
assert_array_equal(a_summed, a.sum(axis=0))
@given(
a=arrays(
float,
shape=st.tuples(
st.integers(min_value=1, max_value=100),
st.integers(min_value=1, max_value=100),
),
elements=st.floats(max_value=1e3, allow_infinity=False, allow_nan=False),
),
axis=st.integers(min_value=0, max_value=1),
)
def test_2d_arrays(self, a: ndarray, axis: int):
a_summed = sum(a=a, axis=axis)
if a.ndim == 1:
axis = 0
assert_array_equal(a_summed, a.sum(axis=axis))
|
modules/imdb.py | nikolas/jenni | 133 | 12654418 | <filename>modules/imdb.py
# -*- coding: utf8 -*-
'''
imdb.py - jenni Movie Information Module
Copyright 2014-2015, yano, yanovich.net
Copyright 2012, <NAME>, <<EMAIL>>
Licensed under the Eiffel Forum License 2.
This module relies on omdbapi.com
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
'''
from modules import proxy
import json
import re
import urllib2
API_BASE_URL = 'http://www.omdbapi.com/'
def prep_title(txt):
txt = txt.replace(' ', '+')
txt = (txt).encode('utf-8')
txt = urllib2.quote(txt)
return txt
def movie(jenni, input):
'''.imdb movie/show title -- displays information about a production'''
if not input.group(2):
return jenni.say('Please enter a movie or TV show title. '
'Year is optional.')
word = input.group(2).rstrip()
matchObj = re.match(r'([\w\s]*)\s?,\s?(\d{4})', word, re.M | re.I)
if matchObj:
title = matchObj.group(1)
year = matchObj.group(2)
title = prep_title(title)
uri = API_BASE_URL + '?t=%s&y=%s&plot=short&r=json' % (title, year)
else:
title = word
title = prep_title(title)
uri = API_BASE_URL + '?t=%s&plot=short&r=json' % (title)
try:
page = proxy.get(uri)
except:
return jenni.say('[IMDB] Connection to API did not succeed.')
try:
data = json.loads(page)
except:
return jenni.say("[IMDB] Couldn't make sense of information from API")
message = '[IMDB] '
if data['Response'] == 'False':
if 'Error' in data:
message += data['Error']
else:
message += 'Got an error from imdbapi'
else:
pre_plot_output = u'Title: {0} | Released: {1} | Rated: {2} '
pre_plot_output += '| Rating: {3} | Metascore: {4} | Genre: {5} '
pre_plot_output += '| Runtime: {6} | Plot: '
genre = data['Genre']
runtime = data['Runtime']
pre_plot = pre_plot_output.format(data['Title'], data['Released'],
data['Rated'], data['imdbRating'],
data['Metascore'], genre,
runtime)
after_plot_output = ' | IMDB Link: http://imdb.com/title/{0}'
after_plot = after_plot_output.format(data['imdbID'])
truncation = '[...]'
## 510 - (16 + 8 + 63)
## max_chars (minus \r\n) - (max_nick_length + max_ident_length
## + max_vhost_lenth_on_freenode)
max_len_of_plot = 423 - (len(pre_plot) + len(after_plot) + len(truncation))
new_plot = data['Plot']
if len(data['Plot']) > max_len_of_plot:
new_plot = data['Plot'][:max_len_of_plot] + truncation
message = pre_plot + new_plot + after_plot
jenni.say(message)
movie.commands = ['imdb', 'movie', 'movies', 'show', 'tv', 'television']
movie.example = '.imdb Movie Title, 2015'
if __name__ == '__main__':
print __doc__.strip()
|
django_apscheduler/migrations/0007_auto_20200717_1404.py | calledbert/django-apscheduler | 331 | 12654442 | <gh_stars>100-1000
# Generated by Django 2.2.14 on 2020-07-17 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_apscheduler", "0006_remove_djangojob_name"),
]
operations = [
migrations.AlterField(
model_name="djangojobexecution",
name="id",
field=models.BigAutoField(
help_text="Unique ID for this job execution.",
primary_key=True,
serialize=False,
),
),
]
|
deep_autoencoder.py | Abdel-Moussaoui/Auto-encoders | 122 | 12654444 | from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import pickle
# Deep Autoencoder
features_path = 'deep_autoe_features.pickle'
labels_path = 'deep_autoe_labels.pickle'
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression factor 24.5, assuming the input is 784 floats
# this is our input placeholder; 784 = 28 x 28
input_img = Input(shape=(784, ))
my_epochs = 100
# "encoded" is the encoded representation of the inputs
encoded = Dense(encoding_dim * 4, activation='relu')(input_img)
encoded = Dense(encoding_dim * 2, activation='relu')(encoded)
encoded = Dense(encoding_dim, activation='relu')(encoded)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(encoding_dim * 2, activation='relu')(encoded)
decoded = Dense(encoding_dim * 4, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# Separate Encoder model
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# Separate Decoder model
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))
# retrieve the layers of the autoencoder model
decoder_layer1 = autoencoder.layers[-3]
decoder_layer2 = autoencoder.layers[-2]
decoder_layer3 = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer3(decoder_layer2(decoder_layer1(encoded_input))))
# Train to reconstruct MNIST digits
# configure model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# prepare input data
(x_train, _), (x_test, y_test) = mnist.load_data()
# normalize all values between 0 and 1 and flatten the 28x28 images into vectors of size 784
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
# Train autoencoder for 50 epochs
autoencoder.fit(x_train, x_train, epochs=my_epochs, batch_size=256, shuffle=True, validation_data=(x_test, x_test),
verbose=2)
# after 100 epochs the autoencoder seems to reach a stable train/test lost value
# Visualize the reconstructed encoded representations
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# save latent space features 32-d vector
pickle.dump(encoded_imgs, open(features_path, 'wb'))
pickle.dump(y_test, open(labels_path, 'wb'))
n = 10 # how many digits we will display
plt.figure(figsize=(10, 2), dpi=100)
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.set_axis_off()
# display reconstruction
ax = plt.subplot(2, n, i + n + 1)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.set_axis_off()
plt.show()
K.clear_session()
|
pytext/utils/tests/label_test.py | dmitryvinn/pytext | 6,199 | 12654451 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from pytext.utils import label
class LabelUtilTest(unittest.TestCase):
def test_get_label_weights(self):
vocab = {"foo": 0, "bar": 1}
weights = {"foo": 3.2, "foobar": 2.1}
weights_tensor = label.get_label_weights(vocab, weights)
np.testing.assert_array_almost_equal(
np.array([3.2, 1]), weights_tensor.detach().numpy()
)
def test_get_auto_label_weights(self):
vocab_dict = {"foo": 0, "bar": 1}
label_counts = {"foo": 4, "bar": 1}
weights_tensor = label.get_auto_label_weights(vocab_dict, label_counts)
np.testing.assert_array_almost_equal(
np.array([0.25, 4]), weights_tensor[0].detach().numpy()
)
def test_get_normalized_sqrt_label_weights(self):
vocab_dict = {"foo": 0, "bar": 1}
label_counts = {"foo": 4, "bar": 1}
weights_tensor = label.get_normalized_sqrt_label_weights(
vocab_dict, label_counts
)
np.testing.assert_array_almost_equal(
np.array([0.5, 2]), weights_tensor[0].detach().numpy()
)
def test_get_normalized_cap_label_weights(self):
vocab_dict = {"foo": 0, "bar": 1}
label_counts = {"foo": 4, "bar": 1}
weights_tensor = label.get_normalized_cap_label_weights(
vocab_dict, label_counts
)
np.testing.assert_array_almost_equal(
np.array([0.625, 1]), weights_tensor[0].detach().numpy()
)
|
test/samples-python/simple.py | voltek62/vscode-ipe | 227 | 12654456 | print('hello')
print('how are you')
print(2+4*2)
|
Lib/test/test_compiler/testcorpus/53_list_comp_method.py | diogommartins/cinder | 1,886 | 12654457 | <gh_stars>1000+
[x for x in s].copy()
|
tooling/ddsketch-reference-generator/main.py | TimonPost/metrics | 506 | 12654521 | #!/usr/bin/env python3
import argparse
from ddsketch.ddsketch import LogCollapsingLowestDenseDDSketch
import numpy as np
import os
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('input', type=argparse.FileType('r'))
parser.add_argument('output', type=argparse.FileType('w'))
parser.add_argument('alpha', type=float, nargs='?', default=0.0001)
parser.add_argument('max_bins', type=int, nargs='?', default=32768)
args = parser.parse_args()
input_floats = []
for line in args.input.readlines():
input_floats += [float(i) for i in line.split(",") if i.strip()]
sketch = LogCollapsingLowestDenseDDSketch(relative_accuracy=args.alpha, bin_limit=args.max_bins)
for v in input_floats:
sketch.add(v)
output_quantiles = [(x, sketch.get_quantile_value(x)) for x in np.linspace(0, 1, 1000)]
for quantile, value in output_quantiles:
args.output.write(f"{quantile:.3},{value:.9}\n")
args.output.flush()
os.fsync(args.output)
if __name__ == "__main__":
main()
|
jionlp/util/funcs.py | ji3g4m6zo6/JioNLP | 1,063 | 12654554 | # -*- coding=utf-8 -*-
# library: jionlp
# author: dongrixinyu
# license: Apache License 2.0
# Email: <EMAIL>
# github: https://github.com/dongrixinyu/JioNLP
# description: Preprocessing tool for Chinese NLP
def bracket(regular_expression):
return ''.join([r'(', regular_expression, r')'])
def bracket_absence(regular_expression):
return ''.join([r'(', regular_expression, r')?'])
def absence(regular_expression):
return ''.join([regular_expression, r'?'])
def start_end(regular_expression):
return ''.join([r'^', regular_expression, r'$'])
|
awx/main/migrations/_scan_jobs.py | bhyunki/awx | 11,396 | 12654567 | <reponame>bhyunki/awx
import logging
logger = logging.getLogger('awx.main.migrations')
def remove_scan_type_nodes(apps, schema_editor):
WorkflowJobTemplateNode = apps.get_model('main', 'WorkflowJobTemplateNode')
WorkflowJobNode = apps.get_model('main', 'WorkflowJobNode')
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
for node in cls.objects.iterator():
prompts = node.char_prompts
if prompts.get('job_type', None) == 'scan':
log_text = '{} set job_type to scan, which was deprecated in 3.2, removing.'.format(cls)
if cls == WorkflowJobNode:
logger.info(log_text)
else:
logger.debug(log_text)
prompts.pop('job_type')
node.char_prompts = prompts
node.save()
def remove_legacy_fact_cleanup(apps, schema_editor):
SystemJobTemplate = apps.get_model('main', 'SystemJobTemplate')
for job in SystemJobTemplate.objects.filter(job_type='cleanup_facts').all():
for sched in job.schedules.all():
sched.delete()
job.delete()
|
api/ui_config.py | fanbyprinciple/gpt3-sandbox | 2,514 | 12654618 | """Class to store customized UI parameters."""
class UIConfig():
"""Stores customized UI parameters."""
def __init__(self, description='Description',
button_text='Submit',
placeholder='Default placeholder',
show_example_form=False):
self.description = description
self.button_text = button_text
self.placeholder = placeholder
self.show_example_form = show_example_form
def get_description(self):
"""Returns the input of the example."""
return self.description
def get_button_text(self):
"""Returns the intended output of the example."""
return self.button_text
def get_placeholder(self):
"""Returns the intended output of the example."""
return self.placeholder
def get_show_example_form(self):
"""Returns whether editable example form is shown."""
return self.show_example_form
def json(self):
"""Used to send the parameter values to the API."""
return {"description": self.description,
"button_text": self.button_text,
"placeholder": self.placeholder,
"show_example_form": self.show_example_form}
|
plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_view_change.py | jandayanan/indy-plenum | 148 | 12654641 | <gh_stars>100-1000
import pytest
from plenum.common.constants import LAST_SENT_PRE_PREPARE
from plenum.test import waits
from plenum.test.helper import sdk_send_batches_of_random, assertExp
from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica
from plenum.test.view_change.helper import ensure_view_change
from stp_core.loop.eventually import eventually
nodeCount = 4
backup_inst_id = 1
num_batches_before = 3
num_batches_after = 1
def test_node_erases_last_sent_pp_key_on_view_change(
looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf):
# Get a node with a backup primary replica
replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
node = replica.node
# Send some 3PC-batches and wait until the replica orders the 3PC-batches
sdk_send_batches_of_random(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client,
num_reqs=3, num_batches=num_batches_before,
timeout=tconf.Max3PCBatchWait)
looper.run(
eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 3)),
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Ensure that there is a stored last sent PrePrepare key on the node
assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB
# Make the pool perform view change
ensure_view_change(looper, txnPoolNodeSet)
ensureElectionsDone(looper, txnPoolNodeSet)
# Verify that the node has erased the stored last sent PrePrepare key
for value in node.last_sent_pp_store_helper._load_last_sent_pp_key().values():
# + 1 it's after view_change
assert value == [node.viewNo, 1]
# Send a 3PC-batch and ensure that the replica orders it
sdk_send_batches_of_random(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client,
num_reqs=1, num_batches=num_batches_after,
timeout=tconf.Max3PCBatchWait)
looper.run(
eventually(lambda: assertExp(replica.last_ordered_3pc == (1, num_batches_after + 1)),
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
|
utils/gapy/traces.py | 00-01/gap_sdk | 118 | 12654693 | #
# Copyright (C) 2019 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
verbose = False
def newLine():
info("")
def critical(msg):
""" Print critical message to stderr """
sys.stderr.write(msg)
sys.stderr.write('\n')
def info(msg):
infoWithoutNewLine(msg + '\n')
def infoWithoutNewLine(msg):
if verbose:
sys.stdout.write(msg)
|
pysbd/utils.py | gaganmanku96/pySBD | 429 | 12654695 | <reponame>gaganmanku96/pySBD
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import pysbd
class Rule(object):
def __init__(self, pattern, replacement):
self.pattern = pattern
self.replacement = replacement
def __repr__(self): # pragma: no cover
return '<{} pattern="{}" and replacement="{}">'.format(
self.__class__.__name__, self.pattern, self.replacement)
class Text(str):
"""Extending str functionality to apply regex rules
https://stackoverflow.com/questions/4698493/can-i-add-custom-methods-attributes-to-built-in-python-types
Parameters
----------
str : str
string content
Returns
-------
str
input as it is if rule pattern doesnt match
else replacing found pattern with replacement chars
"""
def apply(self, *rules):
for each_r in rules:
self = re.sub(each_r.pattern, each_r.replacement, self)
return self
class TextSpan(object):
def __init__(self, sent, start, end):
"""
Sentence text and its start & end character offsets within original text
Parameters
----------
sent : str
Sentence text
start : int
start character offset of a sentence in original text
end : int
end character offset of a sentence in original text
"""
self.sent = sent
self.start = start
self.end = end
def __repr__(self): # pragma: no cover
return "{0}(sent={1}, start={2}, end={3})".format(
self.__class__.__name__, repr(self.sent), self.start, self.end)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.sent == other.sent and self.start == other.start and self.end == other.end
class PySBDFactory(object):
"""pysbd as a spacy component through entrypoints"""
def __init__(self, nlp, language='en'):
self.nlp = nlp
self.seg = pysbd.Segmenter(language=language, clean=False,
char_span=True)
def __call__(self, doc):
sents_char_spans = self.seg.segment(doc.text_with_ws)
start_token_ids = [sent.start for sent in sents_char_spans]
for token in doc:
token.is_sent_start = (True if token.idx
in start_token_ids else False)
return doc
|
rasa_nlu_gao/classifiers/kashgari_intent_classifier.py | GaoQ1/rasa_nlu_gq | 298 | 12654696 | import logging
from typing import List, Text, Any, Optional, Dict
from rasa_nlu_gao.classifiers import INTENT_RANKING_LENGTH
from rasa.nlu.components import Component
from rasa.nlu.model import Metadata
from rasa.nlu.training_data import Message
import os
import shutil
import kashgari
from kashgari.embeddings import BERTEmbedding
import kashgari.tasks.classification as clf
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
logger = logging.getLogger(__name__)
class KashgariIntentClassifier(Component):
provides = ["intent", "intent_ranking"]
defaults = {
"bert_model_path": None,
"sequence_length": "auto",
"layer_nums": 4,
"trainable": False,
"classifier_model": "BiLSTM_Model",
"epochs": 10,
"batch_size": 32,
"validation_split": 0.2,
"patience": 5,
"factor": 0.5, # factor of reduce learning late everytime
"verbose": 1,
"use_cudnn_cell": False
}
def __init__(self,
component_config=None,
model = None):
super(KashgariIntentClassifier, self).__init__(component_config)
bert_model_path = self.component_config.get('bert_model_path')
sequence_length = self.component_config.get('sequence_length')
layer_nums = self.component_config.get('layer_nums')
trainable = self.component_config.get('trainable')
use_cudnn_cell = self.component_config.get('use_cudnn_cell')
kashgari.config.use_cudnn_cell = use_cudnn_cell
self.classifier_model = self.component_config.get('classifier_model')
self.bert_embedding = BERTEmbedding(bert_model_path,
task=kashgari.CLASSIFICATION,
layer_nums = layer_nums,
trainable=trainable,
sequence_length=sequence_length)
self.tokenizer = self.bert_embedding.tokenizer
self.model = model
def train(self, training_data, cfg, **kwargs):
classifier_model = eval("clf." + self.classifier_model)
epochs = self.component_config.get('epochs')
batch_size = self.component_config.get('batch_size')
validation_split = self.component_config.get('validation_split')
patience = self.component_config.get('patience')
factor = self.component_config.get('factor')
verbose = self.component_config.get('verbose')
X, Y = [], []
for msg in training_data.intent_examples:
X.append(self.tokenizer.tokenize(msg.text))
Y.append(msg.get('intent'))
train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100)
self.bert_embedding.processor.add_bos_eos = False
self.model = classifier_model(self.bert_embedding)
checkpoint = ModelCheckpoint(
'intent_weights.h5',
monitor='val_loss',
save_best_only=True,
save_weights_only=False,
verbose=verbose)
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=factor,
patience=patience,
verbose=verbose)
self.model.fit(
train_x,
train_y,
validate_x,
validate_y,
epochs=epochs,
batch_size=batch_size,
callbacks=[checkpoint, early_stopping, reduce_lr]
)
def process(self, message, **kwargs):
intent_ranks = self.get_intent_score(message)
intent = intent_ranks[0]
message.set("intent", intent, add_to_output=True)
message.set("intent_ranking", intent_ranks, add_to_output=True)
def get_intent_score(self, message):
intent_top_k = self.model.predict_top_k_class(
[self.tokenizer.tokenize(message.text)],
top_k = INTENT_RANKING_LENGTH
)[0]
intent_ranks = [{
'name': intent_top_k['label'],
'confidence': float(intent_top_k['confidence'])
}]
for item in intent_top_k['candidates']:
intent_ranks.append({'name': item['label'], 'confidence': float(item['confidence'])})
return intent_ranks
def persist(self,
file_name: Text,
model_dir: Text) -> Optional[Dict[Text, Any]]:
model_path = os.path.join(model_dir, file_name)
self.model.save(model_path)
remove_file = os.path.join(model_path, 'model_weights.h5')
os.remove(remove_file)
shutil.move('intent_weights.h5', model_path)
os.rename(os.path.join(model_path, 'intent_weights.h5'), os.path.join(model_path, 'model_weights.h5'))
return {"file": file_name}
@classmethod
def load(cls,
meta: Dict[Text, Any],
model_dir: Optional[Text]=None,
model_metadata: Optional['Metadata']=None,
cached_component: Optional[Component]=None,
**kwargs: Any
) -> 'KashgariIntentClassifier':
if model_dir and meta.get("file"):
file_name = meta.get("file")
classifier_model = os.path.join(model_dir, file_name)
loaded_model = kashgari.utils.load_model(classifier_model)
return cls(component_config=meta,
model=loaded_model)
else:
logger.warning("Failed to load classifier model. Maybe path {} "
"doesn't exist"
"".format(os.path.abspath(model_dir)))
return cls(component_config=meta)
|
pool_automation/roles/perf_scripts/molecule/resources/tests/test_configured.py | Rob-S/indy-node | 627 | 12654701 | import pytest
testinfra_hosts = ['clients']
def test_pool_txns_genesis_file_exists(host, pool_txns_path):
txns_file = host.file(pool_txns_path)
assert txns_file.exists
def test_perf_processes_can_connect(host, venv_path, pool_txns_path):
assert host.run(
"{}/bin/perf_processes.py --test_conn -g {}"
.format(venv_path, pool_txns_path)).rc == 0
|
non_semantic_speech_benchmark/export_model/model_conversion_beam_main_test.py | DionysisChristopoulos/google-research | 23,901 | 12654711 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model_conversion_beam_main."""
import os
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
import mock
from non_semantic_speech_benchmark.export_model import model_conversion_beam_main
TESTDIR = 'non_semantic_speech_benchmark/export_model/testdata'
class ModelConversionBeamMainTest(absltest.TestCase):
@mock.patch.object(model_conversion_beam_main.utils,
'convert_and_write_model')
@flagsaver.flagsaver
def test_full_flow(self, _):
flags.FLAGS.xids = ['12321']
flags.FLAGS.base_experiment_dir = os.path.join(
absltest.get_default_test_srcdir(), TESTDIR)
flags.FLAGS.output_dir = os.path.join(
absltest.get_default_test_tmpdir(), 'dummy_out')
# Frontend args.
flags.FLAGS.frame_hop = 5
flags.FLAGS.frame_width = 5
flags.FLAGS.num_mel_bins = 80
flags.FLAGS.n_required = 8000
model_conversion_beam_main.main(None)
if __name__ == '__main__':
absltest.main()
|
osp/workers/server.py | davidmcclure/open-syllabus-project | 220 | 12654772 | <filename>osp/workers/server.py
import os
from osp.common import config
from osp.common.utils import partitions
from flask import Flask, request
from rq_dashboard import RQDashboard
from pydoc import locate
# RQ dashboard:
app = Flask(__name__)
RQDashboard(app)
@app.route('/ping')
def ping():
return ('pong', 200)
@app.route('/queue', methods=['POST'])
def queue():
"""
Queue a work order.
"""
config.rq.enqueue(
queue_page,
request.form['model_import'],
request.form['job_import'],
int(request.form['worker_count']),
int(request.form['offset']),
timeout=3600,
)
return ('', 200)
def queue_page(model_import, job_import, worker_count, offset):
"""
Spool a page of model instances for a job.
Args:
model_import (str)
job_import (str)
worker_count (int)
offset (int)
"""
# Import callables.
model = locate(model_import)
job = locate(job_import)
for row in model.page_cursor(worker_count, offset):
config.rq.enqueue(job, row.id)
if __name__ == '__main__':
app.run(port=os.getenv('PORT', 5000))
|
setup.py | ronny-rentner/UltraDict | 131 | 12654797 | from pathlib import Path
from setuptools import setup, Extension
import Cython.Build
# read the contents of your README file
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
version = '0.0.4'
ext = Extension(name="UltraDict", sources=["UltraDict.py"])
setup(
name='UltraDict',
version=version,
description='Sychronized, streaming dictionary that uses shared memory as a backend',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ronny-rentner/UltraDict',
cmdclass={'build_ext': Cython.Build.build_ext},
package_dir={'UltraDict': '.'},
packages=['UltraDict'],
zip_safe=False,
ext_modules=Cython.Build.cythonize(ext, compiler_directives={'language_level' : "3"}),
setup_requires=['cython>=0.24.1'],
python_requires=">=3.9",
)
|
book/lists/unordered_list_test.py | Web-Dev-Collaborative/algos | 153 | 12654803 | import unittest
from unordered_list import UnorderedList
class CorrectnessTest(unittest.TestCase):
def test_adding(self):
l = UnorderedList()
self.assertEqual(l.size(), 0)
self.assertTrue(l.is_empty())
l.add(1)
self.assertEqual(l.size(), 1)
self.assertEqual(l.head.value, 1)
self.assertFalse(l.is_empty())
l.add(2)
self.assertEqual(l.size(), 2)
self.assertEqual(l.head.value, 2)
def test_searching(self):
l = UnorderedList()
for i in range(4):
l.add(i)
for i in range(4):
self.assertTrue(l.search(i))
for item in (5, None, True, "blah"):
self.assertFalse(l.search(item))
def test_remove(self):
l = UnorderedList()
for i in range(3):
l.add(i)
# remove from middle
l.remove(1)
self.assertFalse(l.search(1))
self.assertEqual(l.size(), 2)
# remove from end
l.remove(2)
self.assertFalse(l.search(2))
self.assertEqual(l.size(), 1)
# remove from start
l.remove(0)
self.assertFalse(l.search(0))
self.assertEqual(l.size(), 0)
|
interpolation/smolyak/tests/test_derivatives.py | vishalbelsare/interpolation.py | 110 | 12654816 | def test_derivatives():
from ..interp import SmolyakInterp
from ..grid import SmolyakGrid
d = 5
N = 100
mu = 2
f = lambda x: (x).sum(axis=1)
import numpy.random
ub = numpy.random.random(d) + 6
lb = numpy.random.random(d) - 5
sg = SmolyakGrid(d, mu, lb=lb, ub=ub)
values = f(sg.grid)
si = SmolyakInterp(sg, values)
gg = numpy.random.random((N, d))
res, res_s, res_c, res_x = si.interpolate(
gg, deriv=True, deriv_th=True, deriv_X=True
)
T = sg.grid.shape[0]
assert res.shape == (N,)
assert res_s.shape == (N, d)
assert res_c.shape == (N, T)
assert res_x.shape == (N, T)
# res_s should be identically 1
assert abs(res_s - 1.0).max() < 1e-8
epsilon = 1e-6
# Test derivatives w.r.t. values
si2 = SmolyakInterp(sg, values)
def ff(y):
x = y.reshape(values.shape)
si2.update_theta(x)
return si2.interpolate(gg).ravel()
y0 = values.ravel()
r0 = ff(y0)
jac = numpy.zeros((len(r0), len(y0)))
for n in range(len(y0)):
yi = y0.copy()
yi[n] += epsilon
jac[:, n] = (ff(yi) - r0) / epsilon
jac = jac.reshape((N, T))
assert abs(jac - res_x).max() < 1e-7
# note that accuracy of either numerical or direct computation is not very accurate
# Test derivatives w.r.t. coefficients
theta_0 = si.theta.copy()
def ff_c(y_c):
si2.theta = y_c.reshape(theta_0.shape)
return si2.interpolate(gg).ravel()
r0 = ff_c(theta_0)
jac = numpy.zeros((len(r0), len(theta_0)))
for n in range(len(y0)):
ti = theta_0.copy()
ti[n] += epsilon
jac[:, n] = (ff_c(ti) - r0) / epsilon
jac = jac.reshape((N, T))
assert abs(jac - res_c).max() < 1e-7
|
tests/integration/test_ddp_summarization.py | Anita1017/nlp-recipes | 4,407 | 12654872 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pytest
import torch
@pytest.mark.gpu
@pytest.mark.integration
def test_ddp_extractive_summarization_cnndm_transformers(scripts, tmp):
ddp_env = os.environ.copy()
ddp_env["OMP_NUM_THREADS"] = str(torch.cuda.device_count())
ddp_env["KMP_AFFINITY"] = "verbose"
script = scripts["ddp_bertsumext"]
summary_filename = "bertsumext_prediction.txt"
import subprocess
process = subprocess.Popen(
[
"python",
script,
"--data_dir",
tmp,
"--cache_dir",
tmp,
"--output_dir",
tmp,
"--quick_run",
"true",
"--summary_filename",
summary_filename,
],
env=ddp_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
print(stdout)
if process.returncode:
print(stdout)
print(stderr)
assert False
assert os.path.exists(os.path.join(tmp, summary_filename))
@pytest.mark.skip(
reason="""it takes too long; if the previous test works,
and the notebook runs, this should also work."""
)
@pytest.mark.gpu
@pytest.mark.integration
def test_ddp_abstractive_summarization_cnndm_transformers(scripts, tmp):
script = scripts["ddp_bertsumabs"]
summary_filename = "bertsumabs_prediction.txt"
import subprocess
process = subprocess.Popen(
[
"python",
script,
"--data_dir",
tmp,
"--cache_dir",
tmp,
"--output_dir",
tmp,
"--quick_run",
"true",
"--batch_size",
"1",
"--summary_filename",
summary_filename,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
print(stdout)
if process.returncode:
print(stdout)
print(stderr)
assert False
assert os.path.exists(os.path.join(tmp, summary_filename))
|
recipes/Python/572158_Pythhelper_open_SciTe_sessifiles_Windows/recipe-572158.py | tdiprima/code | 2,023 | 12654879 | # script to load SciTe .session files by double-clicking them in Windows Explorer
# the .session file type must be present and its 'open" command associated with :
# "/path/to/python.exe" "/path/to/scite.py" "%1"
# NOTE: /path/to/scite.py MUST be the same as /path/to/scite.exe !
# Example :
# "c:\python\python.exe" "c:\program files\wscite\wscite.py" "%1"
import sys, os, subprocess
# argv[0] is the full path to where this python script was launched from
# it must be in the same directory as the SciTe executable !
# argv[1] is the full path to the scite session file we want to load
script, sessionpath = sys.argv
# this gives us the path to the scite executable
scite = script[:-2] + 'exe'
# this gives us the basename of the session file and the directory it is in
sessiondir, sessionname = os.path.split(sessionpath)
# here we switch to the session file dir and launch scite with just the file name as the loadsession parm
subprocess.Popen([scite, "-loadsession:%s" % sessionname], cwd = sessiondir)
# script ends without waiting for command completion
|
fastmri_recon/tests/data/datasets/fastmri_pyfunc_non_cartesian_test.py | samiulshuvo/fastmri-reproducible-benchmark | 105 | 12654890 | import numpy as np
import pytest
import tensorflow as tf
from fastmri_recon.data.datasets.fastmri_pyfunc_non_cartesian import train_nc_kspace_dataset_from_indexable
image_shape = [2, 640, 322, 1]
af = 4
us = af / (2 / np.pi)
image_size = [640, 474]
kspace_shape = [image_shape[0], 1, 640 * (474//af), 1]
file_contrast = 'CORPD_FBK'
@pytest.mark.parametrize('ds_kwargs, expected_kspace_shape, orig_shape, use_af', [
({}, kspace_shape, image_shape[-2], True),
({'inner_slices': 1}, [1] + kspace_shape[1:], image_shape[-2], True),
({'inner_slices': 1, 'rand': True}, [1] + kspace_shape[1:], image_shape[-2], True),
({'contrast': file_contrast}, kspace_shape, image_shape[-2], True),
({'n_samples': 1}, kspace_shape, image_shape[-2], True),
({}, kspace_shape, image_shape[-2], False),
])
def test_train_nc_kspace_dataset_from_indexable(
create_full_fastmri_test_tmp_dataset,
ds_kwargs,
expected_kspace_shape,
orig_shape,
use_af,
):
path = create_full_fastmri_test_tmp_dataset['fastmri_tmp_singlecoil_train']
ds = train_nc_kspace_dataset_from_indexable(
path,
image_size,
af=af if use_af else None,
us=None if use_af else us,
**ds_kwargs,
)
(kspace, traj, (shape,)), image = next(iter(ds))
# shape verifications
assert kspace.shape.as_list() == expected_kspace_shape
assert shape.numpy()[0] == orig_shape
assert traj.shape.as_list() == [expected_kspace_shape[0], 2, 640 * (474//af)]
assert image.shape.as_list() == expected_kspace_shape[0:1] + [320, 320, 1]
def test_spiral_dataset(create_full_fastmri_test_tmp_dataset):
path = create_full_fastmri_test_tmp_dataset['fastmri_tmp_singlecoil_train']
ds = train_nc_kspace_dataset_from_indexable(
path,
image_size,
af=af
)
(kspace, traj, (shape,)), image = next(iter(ds))
# shape verifications
assert kspace.shape.as_list() == kspace_shape
assert shape.numpy()[0] == image_shape[-2]
assert traj.shape.as_list() == [kspace_shape[0], 2, 640 * (474//af)]
assert image.shape.as_list() == kspace_shape[0:1] + [320, 320, 1]
|
tests/unit/test_remove_script_semicolon.py | jtalmi/pre-commit-dbt | 153 | 12654907 | import io
import pytest
from pre_commit_dbt.check_script_semicolon import check_semicolon
from pre_commit_dbt.remove_script_semicolon import main
# Input, expected return value, expected output
TESTS = (
(b"foo\n", 0, b"foo\n"),
(b"", 0, b""),
(b"\n\n", 0, b"\n\n"),
(b"\n\n\n\n", 0, b"\n\n\n\n"),
(b"foo", 0, b"foo"),
(b"foo\n;", 1, b"foo\n"),
(b";", 1, b""),
(b";\n\n", 1, b""),
(b";\n\n\n\n", 1, b""),
(b"foo;", 1, b"foo"),
(b"\n\n\n\n;", 1, b"\n\n\n\n"),
(b"\r\r\r\r;", 1, b"\r\r\r\r"),
(b";foo\n", 0, b";foo\n"),
)
@pytest.mark.parametrize(("input_s", "expected_status_code", "output"), TESTS)
def test_fix_semicolon(input_s, expected_status_code, output):
file_obj = io.BytesIO(input_s)
status_code = check_semicolon(file_obj, replace=True)
assert file_obj.getvalue() == output
assert status_code == expected_status_code
def test_fix_semicolon_default():
file_obj = io.BytesIO(b";\n\n")
status_code = check_semicolon(file_obj)
assert file_obj.getvalue() == b";\n\n"
assert status_code == 1
@pytest.mark.parametrize(("input_s", "expected_status_code", "output"), TESTS)
def test_fix_semicolon_integration(input_s, expected_status_code, output, tmpdir):
path = tmpdir.join("file.txt")
path.write_binary(input_s)
status_code = main([str(path)])
file_output = path.read_binary()
assert file_output == output
assert status_code == expected_status_code
|
src/python/platforms/linux/lkl/constants.py | mi-ac/clusterfuzz | 5,023 | 12654913 | <reponame>mi-ac/clusterfuzz
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common constants for LKL."""
import re
LINUX_KERNEL_MODULE_STACK_TRACE = 'Linux Kernel Library Stack Trace:'
#hid-fuzzer: lib/posix-host.c:401: void panic(void): Assertion `0' failed.
LINUX_KERNEL_LIBRARY_ASSERT_REGEX = re.compile(
r'([^:]+): lib/posix-host\.c:\d+: void panic\(void\): Assertion .*')
# Linux version 5.4.58+-ab6926695 where 6926695 is the build id.
# Unlike in a normal linux version string, we do not know the build hash.
LINUX_VERSION_REGEX_LKL = re.compile(r'Linux version .+-(ab([0-9a-f]+)\s)')
# This is the prefix in the repo.prop for the kernel for all
# lkl fuzzers.
LKL_REPO_KERNEL_PREFIX = 'kernel/private/lkl'
LKL_BUILD_TARGET = 'kernel_kasan.lkl_fuzzers'
|
Projects/Healthcare/breast-cancer/src/heatmaps/models.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 3,266 | 12654945 | <filename>Projects/Healthcare/breast-cancer/src/heatmaps/models.py
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>
#
# This file is part of breast_cancer_classifier.
#
# breast_cancer_classifier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# breast_cancer_classifier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
"""
Defines the heatmap generation model used in run_producer.py
"""
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import pad
import torchvision.models.densenet as densenet
class ModifiedDenseNet121(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.densenet = densenet.densenet121(*args, **kwargs)
self._is_modified = False
def _modify_densenet(self):
"""
Replace Conv2d and MaxPool2d to resolve the differences in padding
between TensorFlow and PyTorch
"""
assert not self._is_modified
for full_name, nn_module in self.densenet.named_modules():
if isinstance(nn_module, (nn.Conv2d, nn.MaxPool2d)):
module_name_parts = full_name.split(".")
parent = self._get_module(self.densenet, module_name_parts[:-1])
actual_module_name = module_name_parts[-1]
assert "conv" in module_name_parts[-1] or "pool" in module_name_parts[-1]
setattr(parent, actual_module_name, TFSamePadWrapper(nn_module))
self._is_modified = True
def load_from_path(self, model_path):
self.densenet.load_state_dict(torch.load(model_path))
self._modify_densenet()
def forward(self, x):
if not self._is_modified:
self._modify_densenet()
features = self.densenet.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
out = self.densenet.classifier(out)
return out
@classmethod
def _get_module(cls, model, module_name_parts):
obj = model
for module_name_part in module_name_parts:
obj = getattr(obj, module_name_part)
return obj
class TFSamePadWrapper(nn.Module):
"""
Outputs a new convolutional or pooling layer which uses TensorFlow-style "SAME" padding
"""
def __init__(self, sub_module):
super(TFSamePadWrapper, self).__init__()
self.sub_module = copy.deepcopy(sub_module)
self.sub_module.padding = 0
if isinstance(self.sub_module.kernel_size, int):
self.kernel_size = (self.sub_module.kernel_size, self.sub_module.kernel_size)
self.stride = (self.sub_module.stride, self.sub_module.stride)
else:
self.kernel_size = self.sub_module.kernel_size
self.stride = self.sub_module.stride
def forward(self, x):
return self.sub_module(self.apply_pad(x))
def apply_pad(self, x):
pad_height = self.calculate_padding(x.shape[2], self.kernel_size[0], self.stride[0])
pad_width = self.calculate_padding(x.shape[3], self.kernel_size[1], self.stride[1])
pad_top, pad_left = pad_height // 2, pad_width // 2
pad_bottom, pad_right = pad_height - pad_top, pad_width - pad_left
return pad(x, [pad_top, pad_bottom, pad_left, pad_right])
@classmethod
def calculate_padding(cls, in_dim, kernel_dim, stride_dim):
if in_dim % stride_dim == 0:
return max(0, kernel_dim - stride_dim)
return max(0, kernel_dim - (in_dim % stride_dim))
|
dev/Gems/StarterGame/Environment/Assets/renamer.py | brianherrera/lumberyard | 1,738 | 12654947 | <reponame>brianherrera/lumberyard
import os
import shutil
from os import path
def main():
for filename in os.listdir("."):
print(filename)
originalFilename = filename
filename = filename.lower()
filename = filename.replace("am_", "")
os.rename(originalFilename, filename)
if __name__ == "__main__":
main() |
Chapter 01/1.12.py | ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition | 120 | 12654950 | """
Code illustration: 1.12
A demonstration of tkinter styling
@Tkinter GUI Application Development Blueprints
"""
import tkinter as tk
root = tk.Tk()
root.configure(background='#4D4D4D') #top level styling
# connecting to the external styling optionDB.txt
root.option_readfile('optionDB.txt')
#widget specific styling
text = tk.Text(
root,
background='#101010',
foreground="#D6D6D6",
borderwidth=18,
relief='sunken',
width=17,
height=5)
text.insert(
tk.END,
"Style is knowing who you are,what you want to say, and not giving a damn."
)
text.grid(row=0, column=0, columnspan=6, padx=5, pady=5)
# all the below widgets derive their styling from optionDB.txt file
tk.Button(root, text='*').grid(row=1, column=1)
tk.Button(root, text='^').grid(row=1, column=2)
tk.Button(root, text='#').grid(row=1, column=3)
tk.Button(root, text='<').grid(row=2, column=1)
tk.Button(
root, text='OK', cursor='target').grid(
row=2, column=2) #changing cursor style
tk.Button(root, text='>').grid(row=2, column=3)
tk.Button(root, text='+').grid(row=3, column=1)
tk.Button(root, text='v').grid(row=3, column=2)
tk.Button(root, text='-').grid(row=3, column=3)
for i in range(10):
tk.Button(
root, text=str(i)).grid(
column=3 if i % 3 == 0 else (1 if i % 3 == 1 else 2),
row=4 if i <= 3 else (5 if i <= 6 else 6))
root.mainloop()
|
AutotestWebD/apps/user_center/views/user_uri_conf.py | yangjourney/sosotest | 422 | 12654960 | <gh_stars>100-1000
from apps.common.func.CommonFunc import *
from django.db.models import Max
from apps.common.func.LanguageFunc import *
from django.shortcuts import render, HttpResponse
from all_models.models import *
from urllib import parse
from apps.user_center.services.user_uriService import user_uriService
from apps.common.config import commonWebConfig
from apps.config.services.serviceConfService import ServiceConfService
from apps.config.services.http_confService import HttpConfService
from apps.config.services.uriService import UriService
from apps.common.decorator.permission_normal_funcitons import *
import json
def userUriCheck(request):
langDict = getLangTextDict(request)
context = {}
context["httpUserCenterURIConfPage"] = "current-page"
context["userName"] = request.session.get("userName")
if not isRelease:
context["env"] = "test"
# 文本
text = {}
text["pageTitle"] = langDict["web"]["httpUserCenterUserUriPageTitle"]
text["subPageTitle"] = langDict["web"]["httpUserCenterUserUriSubPageTitle"]
context["text"] = text
context["page"] = 1
context["uri"] = UriService.getUri(request, "ALL")
context["title"] = "服务配置"
return render(request, "InterfaceTest/user_center/user_uri_conf.html", context)
# def getUriData(request):
# id = request.GET.get("id")
# httpConfData = dbModelToDict(HttpConfService.getHttpConfForId(id))
# httpConfList = httpConfData["httpConf"].split("\n")
# result = []
# loop = 0
# for httpConfIndex in range (1,len(httpConfList)):
# if httpConfList[httpConfIndex] == "" or "=" not in httpConfList[httpConfIndex]:
# continue
#
# indexData = httpConfList[httpConfIndex].split("=")
# result.append({})
# result[loop]["httpConfKey"] = indexData[0].strip()
# result[loop]["httpConfValue"] = indexData[1].strip()
#
# loop += 1
# return HttpResponse(ApiReturn(body=result).toJson())
def queryUserUriConf(request):
page = request.POST.get("page",1)
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
execSql = "SELECT s.*,tb_user.userName,muser.userName modByName FROM tb_config_uri s LEFT JOIN tb_user ON s.addBy=tb_user.loginName LEFT JOIN tb_user muser ON s.modBy=muser.loginName " \
"LEFT JOIN (SELECT * FROM ( SELECT id ucid,uriKey uuUriKey,conflevel FROM tb_user_uri " \
"WHERE addBy= '%s' ) b LEFT JOIN (SELECT uriKey cuUriKey FROM tb_config_uri) a ON b.uuUrikey = a.cuUriKey) c ON s.uriKey = c.cuUriKey " \
"WHERE s.state = 1" % request.session.get("loginName")
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
if key == "addBy":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (s.addBy LIKE %s or tb_user.userName LIKE %s) """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and s.%s """ % key
execSql += """ LIKE %s"""
execSql += """ order by c.conflevel is null,c.conflevel ASC,s.modTime desc"""
print(execSql)
context = pagination(execSql, checkList, page, commonWebConfig.userHttpConfPageNum,request=request)
context["uriServices"] = UriService.getUri(request)
context["dubboServices"] = dbModelListToListDict(TbConfigUri.objects.filter(state=1, protocol="DUBBO").order_by("level"))
response = render(request, "InterfaceTest/user_center/SubPages/user_uri_conf_sub_page.html", context)
return response
def addUserUriSort(request):
uriKey = request.POST.get("uriKey")
loginName = request.session.get("loginName")
userUriCount = user_uriService.queryUserUriCount(loginName)
if userUriCount == 0:
user_uriService.addUserUrl(loginName,uriKey,0)
return HttpResponse(ApiReturn().toJson())
else:
userCount = dbModelListToListDict(user_uriService.queryUserUriRepeat(loginName,uriKey))
editLevel = dbModelListToListDict(user_uriService.queryUserUri(loginName))
if len(userCount) == 0:
for i in range(0,len(editLevel)):
editLevel[i]["conflevel"] += 1
editLevel[i]["modTime"] = datetime.datetime.now()
user_uriService.updateLevel(editLevel[i])
user_uriService.addUserUrl(loginName,uriKey,0)
return HttpResponse(ApiReturn().toJson())
elif userCount[0]["conflevel"] != 0:
for i in range(0, len(editLevel)):
editLevel[i]["conflevel"] = i+1
editLevel[i]["modTime"] = datetime.datetime.now()
user_uriService.updateLevel(editLevel[i])
userCount[0]["conflevel"] = 0
userCount[0]["modTime"] = datetime.datetime.now()
user_uriService.updateLevel(userCount[0])
return HttpResponse(ApiReturn().toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_EXCEPTION,"此配置已排在第一位").toJson())
@single_data_permission(TbConfigUri,TbConfigUri)
def addUserUriApply(request):
alias = request.POST.get("alias")
protocols = request.POST.get("protocols")
loginName = request.session.get("loginName")
failedProtocol = ""
for tmpProtocol in protocols.split(","):
if tmpProtocol.strip() != "":
try:
try:
id = TbConfigUri.objects.all().aggregate(Max('id'))["id__max"] + 1
except:
id = 1
tmpUriModel = TbConfigUri()
uriKey = "%s-%s" % (tmpProtocol.strip().lower(),alias)
protocol = tmpProtocol.strip()
if protocol == "HTTP":
uriAlias = alias
else:
uriAlias = "%s(%s)" % (alias,tmpProtocol.strip().lower())
oldData = TbConfigUri.objects.filter(uriKey=uriKey)
if len(oldData) > 0:
data = oldData[0]
data.state = 1
data.save()
else:
tmpUriModel.id = id
tmpUriModel.alias = uriAlias
tmpUriModel.uriDesc = "%s的%s服务" % (uriKey,tmpProtocol)
tmpUriModel.uriKey = uriKey
tmpUriModel.protocol = protocol
tmpUriModel.addBy = loginName
tmpUriModel.save(force_insert=True)
except Exception as e:
print(traceback.format_exc())
failedProtocol += tmpProtocol.strip()+" "
if failedProtocol == "":
return HttpResponse(ApiReturn(code=ApiReturn.CODE_OK,message="添加成功!").toJson())
else:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR,message="协议%s添加失败!" % failedProtocol).toJson())
@single_data_permission(TbConfigUri,TbConfigUri)
def saveUriEdit(request):
uriKey = request.POST.get("uriKey")
httpConfDesc = request.POST.get("httpConfDesc")
loginName = request.session.get("loginName")
TbConfigUri.objects.filter(uriKey=uriKey).update(uriDesc=httpConfDesc,modBy=loginName,modTime=get_current_time())
return HttpResponse(ApiReturn(code=ApiReturn.CODE_OK,message="修改成功!").toJson())
@single_data_permission(TbConfigUri,TbConfigUri)
def delUri(request):
uriKey = request.GET.get("uriKey")
loginName = request.session.get("loginName")
TbConfigUri.objects.filter(uriKey=uriKey).update(state=0,modBy=loginName,modTime=get_current_time())
return HttpResponse(ApiReturn(code=ApiReturn.CODE_OK,message="删除成功!").toJson())
def userEnvUriCheck(request):
langDict = getLangTextDict(request)
context = {}
context["httpUserCenterEnvURIConfPage"] = "current-page"
context["userName"] = request.session.get("userName")
if not isRelease:
context["env"] = "test"
# 文本
text = {}
text["pageTitle"] = "请求地址配置"
text["subPageTitle"] = "请求地址查看"
context["text"] = text
context["page"] = 1
context["envConfList"] = HttpConfService.getAllHttpConf(request)
context["uri"] = UriService.getUri(request,"ALL")
context["title"] = "请求地址配置"
return render(request, "InterfaceTest/user_center/user_env_uri_conf.html", context)
def queryUserEnvUriConf(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
execSql = "SELECT s.*,tb_user.userName,curi.protocol protocol,muser.userName modByName FROM tb_env_uri_conf s LEFT JOIN tb_user ON s.addBy=tb_user.loginName LEFT JOIN tb_user muser ON s.modBy=muser.loginName " \
"LEFT JOIN tb_config_http chttp ON s.httpConfKey=chttp.httpConfKey " \
"LEFT JOIN tb_config_uri curi ON s.uriKey=curi.uriKey " \
"WHERE s.state = 1"
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
if key == "addBy":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (s.addBy LIKE %s or tb_user.userName LIKE %s) """
continue
if key == "protocol":
checkList.append("%s" % checkArr[key])
execSql += """ and curi.%s """ % key
execSql += """ = %s"""
continue
if key in ["httpConfKey","uriKey"]:
checkList.append("%s" % checkArr[key])
execSql += """ and s.%s """ % key
execSql += """ = %s"""
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and s.%s """ % key
execSql += """ LIKE %s"""
execSql += """ order by s.modTime DESC"""
context = pagination(execSql, checkList, page, commonWebConfig.userHttpConfPageNum,request=request)
# context["uriServices"] = UriService.getUri(request)
# context["dubboServices"] = dbModelListToListDict(TbConfigUri.objects.filter(state=1, protocol="DUBBO").order_by("level"))
response = render(request, "InterfaceTest/user_center/SubPages/env_uri_conf_sub_page.html", context)
return response
@single_data_permission(TbEnvUriConf,TbEnvUriConf)
def delEnvUri(request):
id = request.GET.get("id")
TbEnvUriConf.objects.filter(id=id).update(state=0)
return HttpResponse(ApiReturn().toJson())
@single_data_permission(TbEnvUriConf,TbEnvUriConf)
def saveEditEnvUri(request):
id = request.POST.get("id")
requestAddr = request.POST.get("requestAddr")
TbEnvUriConf.objects.filter(id=id).update(requestAddr=requestAddr,modBy=request.session.get("loginName"),state=1)
return HttpResponse(ApiReturn().toJson())
@single_data_permission(TbEnvUriConf,TbEnvUriConf)
def saveEnvUri(request):
requestAddr = request.POST.get("requestAddr")
httpConfKey = request.POST.get("httpConfKey")
uriKey = request.POST.get("uriKey")
envUri = TbEnvUriConf.objects.filter(httpConfKey = httpConfKey,uriKey=uriKey)
if(envUri):
if envUri[0].state == 1:
#提示错误
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR,message="已经存在的请求配置,不能重复添加,请编辑!").toJson())
elif envUri[0].state == 0:
#进行更新
envUri[0].state = 1
envUri[0].requestAddr = requestAddr
envUri[0].addBy = request.session.get("loginName")
envUri[0].addTime = get_current_time()
envUri[0].save(force_update=True)
return HttpResponse(ApiReturn(message="添加成功!").toJson())
else:
#进行add
teuri = TbEnvUriConf()
teuri.requestAddr = requestAddr
teuri.httpConfKey = httpConfKey
teuri.uriKey = uriKey
teuri.addBy = request.session.get("loginName")
teuri.save(force_insert=True)
return HttpResponse(ApiReturn(message="添加成功!!").toJson())
def delAllUserUri(request):
TbUserUri.objects.filter(addBy=request.session.get("loginName")).delete()
return HttpResponse(ApiReturn().toJson()) |
tests/test_fiona.py | paultimothymooney/docker-python-2 | 2,030 | 12654974 | import unittest
import fiona
import pandas as pd
class TestFiona(unittest.TestCase):
def test_read(self):
with fiona.open("/input/tests/data/coutwildrnp.shp") as source:
self.assertEqual(67, len(source))
|
webmagic-scripts/src/main/resources/python/oschina.py | AngelPASTORROJAS/L3S6-GL-webmagic-PASTOR_ROJAS-BENAOUD | 11,052 | 12654988 | title=xpath("div[@class=BlogTitle]")
urls="http://my\\.oschina\\.net/flashsword/blog/\\d+"
result={"title":title,"urls":urls}
|
Chapter 07/responsive.py | ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition | 120 | 12655004 | <filename>Chapter 07/responsive.py
'''
Chapter 7
A demonstration of a responsive window
using Grid.rowconfigure and Grid.columnconfigure
'''
from tkinter import Tk, Button, Grid
root = Tk()
for x in range(10):
btn = Button(root, text=x )
btn.grid(column=x, row=1, sticky='nsew')
Grid.rowconfigure(root, 2, weight=x)
Grid.columnconfigure(root, 2, weight=x)
root.mainloop()
|
tracker/test_utils.py | rcaudill/SkyScan | 223 | 12655037 | """Unit tests for utils.py"""
import pytest
from utils import bearing, calc_travel, coordinate_distance, deg2rad, elevation
def test_deg2rad():
"""Unit tests for deg2rad()."""
# Note: python's math package includes a radians function that
# converts degrees to radians. This function could be eliminated
# to reduce custom code.
assert deg2rad(57.2958) == 1.0000003575641672
assert deg2rad(1) == 0.017453292519943295
assert deg2rad(-1) == -0.017453292519943295
@pytest.mark.skip(reason="Insufficient documentation to test. No docstrings.")
def test_elevation():
"""Unit test for elevation()."""
pass
def test_bearing():
"""Unit test for bearing()."""
# Example from: https://www.igismap.com/formula-to-find-bearing-or-heading-angle-between-two-points-latitude-longitude/
lat1, long1 = 39.099912, -94.581213
lat2, long2 = 38.627089, -90.200203
expected_bearing = 96.51262423499941
assert bearing(lat1, long1, lat2, long2) == expected_bearing
def test_coordinate_distance():
"""Unit test for coordinate_distance()."""
# Used this app to calculate distance: https://www.movable-type.co.uk/scripts/latlong.html
lat1, long1 = 39.099912, -94.581213
lat2, long2 = 38.627089, -90.200203
expected_distance = 382900.05037560174
assert coordinate_distance(lat1, long1, lat2, long2) == expected_distance
@pytest.mark.skip(reason="Insufficient documentation to test. What is lead_s?")
def test_calc_travel():
"""Unit test for calc_travel()."""
# note: the code in calc_travel is hard to understand because of the tangle
# of calculations. consider reformatting and explaining or explore the possibility
# using geopy
pass
|
src/genie/libs/parser/iosxe/tests/ShowL2fibBdPort/cli/equal/golden_output1_expected.py | balmasea/genieparser | 204 | 12655051 | <gh_stars>100-1000
expected_output = {
'Et0/2:12': {
'type': 'BD_PORT',
'is_path_list': False,
'port': 'Et0/2:12'
},
'[IR]20012:2.2.2.2': {
'type':'VXLAN_REP',
'is_path_list': True,
'path_list': {
'id': 1191,
'path_count': 1,
'type': 'VXLAN_REP',
'description': '[IR]20012:2.2.2.2'
}
},
'[IR]20012:3.3.3.2': {
'type':'VXLAN_REP',
'is_path_list': True,
'path_list': {
'id': 1184,
'path_count': 1,
'type': 'VXLAN_REP',
'description': '[IR]20012:3.3.3.2'
}
}
} |
apischema/deserialization/flattened.py | wyfo/apimodel | 118 | 12655055 | from typing import Iterator, Mapping, Sequence, Type
from apischema.conversions.conversions import DefaultConversion
from apischema.conversions.visitor import DeserializationVisitor
from apischema.objects import ObjectField
from apischema.objects.visitor import DeserializationObjectVisitor
from apischema.types import AnyType
from apischema.utils import get_origin_or_type
from apischema.visitor import Unsupported
class InitFlattenedAliasVisitor(
DeserializationObjectVisitor[Iterator[str]], DeserializationVisitor[Iterator[str]]
):
def mapping(
self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType
) -> Iterator[str]:
yield from ()
def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> Iterator[str]:
for field in fields:
if field.flattened:
yield from get_deserialization_flattened_aliases(
get_origin_or_type(tp), field, self.default_conversion
)
elif not field.is_aggregate:
yield field.alias
def _visited_union(self, results: Sequence[Iterator[str]]) -> Iterator[str]:
if len(results) != 1:
raise NotImplementedError
return results[0]
def get_deserialization_flattened_aliases(
cls: Type, field: ObjectField, default_conversion: DefaultConversion
) -> Iterator[str]:
assert field.flattened
try:
yield from InitFlattenedAliasVisitor(default_conversion).visit_with_conv(
field.type, field.deserialization
)
except (NotImplementedError, Unsupported):
raise TypeError(
f"Flattened field {cls.__name__}.{field.name} must have an object type"
) from None
|
tests/test_api_bugs.py | lukasschwab/arxiv.py | 553 | 12655073 | <filename>tests/test_api_bugs.py
"""
Tests for work-arounds to known arXiv API bugs.
"""
import arxiv
import unittest
class TestClient(unittest.TestCase):
def test_missing_title(self):
"""
Papers with the title "0" do not have a title element in the Atom feed.
It's unclear whether other falsey titles (e.g. "False", "null", or empty
titles) are allowed by arXiv and are impacted by this bug. This may also
surface for other expected fields (e.g. author names).
+ GitHub issue: https://github.com/lukasschwab/arxiv.py/issues/71
+ Bug report: https://groups.google.com/u/1/g/arxiv-api/c/ORENISrc5gc
"""
paper_without_title = "2104.12255v1"
try:
results = list(arxiv.Search(id_list=[paper_without_title]).results())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].get_short_id(), paper_without_title)
except AttributeError:
self.fail("got AttributeError fetching paper without title")
|
examples/real_data/classification_test.py | mens-artis/Auto-PyTorch | 1,657 | 12655086 | <filename>examples/real_data/classification_test.py
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
import os, sys
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import logging
from autoPyTorch import AutoNetClassification
from autoPyTorch.data_management.data_manager import DataManager
dm = DataManager(verbose=1)
dataset_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'datasets'))
# choose between the 10 classification testcases on real data.
TEST_CASE = 4
if TEST_CASE == 1:
dm.read_data("openml:22", is_classification=True)
# 2000 samples, 10 classes, 48 features
if TEST_CASE == 2:
dm.read_data("openml:1476", is_classification=True)
# 13910 samples, 6 classes, 128 features
if TEST_CASE == 3:
dm.read_data("openml:1464", is_classification=True)
# 748 samples, 2 classes, 4 features
if TEST_CASE == 4:
dm.read_data("openml:31", is_classification=True)
if TEST_CASE == 5:
dm.read_data("openml:28", is_classification=True)
# 5620 samples, 10 classes, 65 features
if TEST_CASE == 6:
dm.read_data("openml:42", is_classification=True)
# 683 samples, 19 classes, 36 categorical features
if TEST_CASE == 7:
dm.read_data("openml:44", is_classification=True)
# 4601 samples, 2 classes, 58 features
if TEST_CASE == 8:
dm.read_data("openml:32", is_classification=True)
if TEST_CASE == 9:
dm.read_data("openml:334", is_classification=True)
if TEST_CASE == 10:
dm.read_data("openml:40996", is_classification=True)
autonet = AutoNetClassification(budget_type='epochs', min_budget=1, max_budget=9, num_iterations=1, log_level='info')
res = autonet.fit(X_train=dm.X_train,
Y_train=dm.Y_train,
early_stopping_patience=3,
# validation_split=0.3,
categorical_features=dm.categorical_features)
print(res)
|
analysis/control/type_util.py | leozz37/makani | 1,178 | 12655136 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for additional type-checking."""
import collections
import numpy as np
class MatrixShapeOrTypeException(Exception):
"""Exception indicating a non-matrix argument."""
pass
def CheckIsMatrix(arg, shape=None):
return isinstance(arg, np.matrix) and (shape is None or arg.shape == shape)
def RequireMatrixArguments(*shapes):
"""A decorator that ensures arguments are np.matrix objects of given shapes.
Args:
*shapes: A list whose elements are either None or two element tuples (n, m).
There should be one element per argument to the decorated function. The
non-None arguments will be required to be n-by-m numpy.matrix objects.
Returns:
Decorator.
"""
def _CheckArguments(f):
assert len(shapes) == f.func_code.co_argcount
def _Wrapped(*args, **kwargs):
for (arg, shape) in zip(args, shapes):
if shape is not None and not CheckIsMatrix(arg, shape=shape):
raise MatrixShapeOrTypeException(shape)
return f(*args, **kwargs)
return _Wrapped
return _CheckArguments
def MakeNamedVectorClass(name, field_indices):
"""Generate a class for handling vectors with named sub-components.
Returns a class which extends collections.namedtuple so that each
element is a "slice" of a dim-by-1 np.matrix. The field_indices
argument is a list of pairs. The first entry of each pair is the
field name, the second entry is a list of vector indices, e.g.:
FooClass = MakeNamedVectorClass('Foo', [('r', [0]), ('i', [1, 2, 3])])
foo_instance = Foo(r=np.matrix[[1.0]], i=np.matrix[[2.0], [3.0], [4.0]])
Here, the total dimension is 4, and foo_instance.ToVector() will be
np.matrix([[0.0], [1.0], [2.0], [3.0]]).
Args:
name: Name to give the class.
field_indices: List of tuples defining the class as above.
Returns:
Named vector class defined as above.
"""
keys = [key for (key, _) in field_indices]
indices = [index for (_, index) in field_indices]
all_indices = []
for index in indices:
all_indices += index
dim = len(all_indices)
assert set(all_indices) == set(range(dim))
tuple_type = collections.namedtuple(name + 'Repr', keys)
class NamedVector(tuple_type):
"""Class representing a dim-by-1 np.matrix with named slices."""
def __init__(self, *args, **kwargs):
indices_dict = {key: index for (key, index) in field_indices}
for (key, value) in kwargs.iteritems():
if not CheckIsMatrix(value, shape=(len(indices_dict[key]), 1)):
raise MatrixShapeOrTypeException((key, value))
super(NamedVector, self).__init__(*args, **kwargs)
def ToVector(self):
"""Return the dim-by-1 np.matrix combining the named component vectors."""
vector = np.matrix(np.zeros((dim, 1)))
for i, index in enumerate(indices):
vector[index] = self[i]
return vector
@classmethod
@RequireMatrixArguments(None, (dim, 1))
def FromVector(cls, vector):
"""Inverse of ToVector()."""
values = [None for _ in keys]
for i, index in enumerate(indices):
values[i] = vector[index]
return cls(*values)
@classmethod
def GetIndices(cls):
"""Get a namedtuple whose elements are the component indices."""
return tuple_type(*indices)
@classmethod
def GetDim(cls):
return dim
@classmethod
def StepVector(cls, step_sizes):
"""Maps a {field_name: step_size} dict to a vector of step sizes."""
step_vector = np.matrix(np.zeros((cls.GetDim(), 1)))
indices = cls.GetIndices()
for field_name, size in step_sizes.iteritems():
step_vector[getattr(indices, field_name), 0] = size
assert (step_vector > 0.0).all
return step_vector
return NamedVector
def MakeStateClass(name, field_indices):
"""Creates a class for representing system state.
Generates a class for representing the state of a system where some
components of the state lie on manifolds such as SO(3). This
involves constructing two classes. The first is a class that
behaves like a namedtuple with each entry being a component of the
state. The second class behaves like a NamedVector and represents a
tangent vector for this space. The user must make a subclass of
this StateClass returned by this method to handle moving states
along tangent directions and recovering tangent directions from
pairs of states. An example is given below:
class AttitudeState(MakeStateClass(
'AttitudeState, [('omega', range(0, 3)),
('dcm_g2b', range(3, 6))])):
def Increment(self, tangent, step=1.0):
...
def Decrement(self, other_state):
...
state = AttitudeState(omega=np.matrix(np.zeros((3, 1))),
dcm_g2b=np.matrix(np.eye(3)))
tangent = AttitudeState.Tangent(domega=np.matrix([[1.0], [2.0], [3.0]]),
ddcm_g2b=np.matrix([[4.0], [5.0], [6.0]]))
# This is equivalent to np.matrix([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]).
tangent.ToVector()
The structure of the state is given by field_indices which is a list
of pairs (field_name, tangent_indices). The string field_name gives
a name to this component of the state.
The Tangent class is a NamedVector with fields named 'd' +
field_name which are stored in the tangent_indices components of the
vector.
Args:
name: Name of the class to create.
field_indices: List of pairs (field_name, tangent_indices) describing
the structure of the class to create.
Returns:
A new class as described above.
"""
keys = [key for (key, _) in field_indices]
class StateClass(collections.namedtuple(name, keys)):
"""Class representing the state of a system."""
Tangent = MakeNamedVectorClass( # pylint: disable=invalid-name
name + 'Tangent',
[('d' + key, value) for (key, value) in field_indices])
def Increment(self, tangent, step=1.0):
raise NotImplementedError
def Difference(self, other_state):
raise NotImplementedError
return StateClass
def MakeFlatStateClass(name, field_indices):
"""Creates a class for representing system state in R^n.
Generates a class for representing the state of a system where
the Tangent vectors can be defined by element-wise addition
and subtraction of the states.
Args:
name: See MakeStateClass.
field_indices: See MakeStateClass.
Returns:
A new class as described above.
"""
class FlatStateClass(MakeStateClass(name, field_indices)):
"""StateClass representing a state in R^n."""
def Increment(self, tangent, step=1.0):
assert isinstance(tangent, FlatStateClass.Tangent)
return FlatStateClass(
*[value + step * tangent_value
for (value, tangent_value) in zip(self, tangent)])
def Difference(self, other_state):
return FlatStateClass.Tangent(
*[other_value - value
for (other_value, value) in zip(other_state, self)])
return FlatStateClass
|
scripts/generate_mapped_file.py | abhim00/pileup.js | 281 | 12655148 | <gh_stars>100-1000
#!/usr/bin/env python
'''
This script generates mapped files for use with test/MappedRemotedFile.js.
Usage:
$ cat <END http://path/to/file.txt
[[0, 1234], [5678, 6789]]
END
...
Wrote file.mapped.txt
Use with:
new MappedRemoteFile('file.mapped.txt', [
[0, 1234],
[5678, 6789]
]);
'''
import fileinput
import json
import os
import requests
import urlparse
import sys
_, url = sys.argv
ranges = json.load(sys.stdin)
ranges.sort(key=lambda x: x[0])
# TODO: coalesce ranges instead of failing
for r1, r2 in zip(ranges[:-1], ranges[1:]):
assert r1[1] < r2[0]
outfile = os.path.basename(urlparse.urlparse(url).path) + '.mapped'
with open(outfile, 'wb') as out:
total_bytes = 0
for start, stop in ranges:
headers = {'Range': 'bytes=%s-%s' % (start, stop)}
result = requests.get(url, headers=headers).content
total_bytes += len(result)
out.write(result)
print '''Wrote %d bytes to %s
Use with:
new MappedRemoteFile('%s', %s)
''' % (total_bytes, outfile, outfile, json.dumps(ranges))
|
seleniumwire/thirdparty/mitmproxy/net/http/http1/__init__.py | KozminMoci/selenium-wire | 975 | 12655160 | <filename>seleniumwire/thirdparty/mitmproxy/net/http/http1/__init__.py
from .assemble import assemble_body, assemble_request, assemble_request_head, assemble_response, assemble_response_head
from .read import (connection_close, expected_http_body_size, read_body, read_request, read_request_head,
read_response, read_response_head)
__all__ = [
"read_request", "read_request_head",
"read_response", "read_response_head",
"read_body",
"connection_close",
"expected_http_body_size",
"assemble_request", "assemble_request_head",
"assemble_response", "assemble_response_head",
"assemble_body",
]
|
scripts/__init__.py | owid/co2-data | 245 | 12655179 | import os
CURRENT_DIR = os.path.dirname(__file__)
INPUT_DIR = os.path.join(CURRENT_DIR, "input")
OUTPUT_DIR = os.path.join(CURRENT_DIR, "..")
|
macarico/lts/behavioral_cloning.py | bgalbraith/macarico | 121 | 12655207 | from __future__ import division, generators, print_function
import macarico
class BehavioralCloning(macarico.Learner):
def __init__(self, policy, reference):
macarico.Learner.__init__(self)
assert isinstance(policy, macarico.CostSensitivePolicy)
self.policy = policy
self.reference = reference
self.objective = 0.0
def forward(self, state):
ref = self.reference(state)
self.objective += self.policy.update(state, ref)
return ref
def get_objective(self, _):
ret = self.objective
self.objective = 0.0
return ret
|
DQM/HcalTasks/python/TestTask.py | ckamtsikis/cmssw | 852 | 12655224 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
testTask = cms.EDAnalyzer(
"TestTask",
# standard
name = cms.untracked.string("TestTask"),
debug = cms.untracked.int32(0),
runkeyVal = cms.untracked.int32(0),
runkeyName = cms.untracked.string("pp_run"),
tagHF = cms.untracked.InputTag("qie10Digis")
)
|
training_dataset/yt_bb/visual.py | Anonymous502/siamfda-for-eccv | 4,318 | 12655240 | import glob
import pandas as pd
import numpy as np
import cv2
visual = True
col_names = ['youtube_id', 'timestamp_ms', 'class_id', 'class_name',
'object_id', 'object_presence', 'xmin', 'xmax', 'ymin', 'ymax']
df = pd.DataFrame.from_csv('yt_bb_detection_validation.csv', header=None, index_col=False)
df.columns = col_names
frame_num = len(df['youtube_id'])
img_path = glob.glob('/mnt/qwang/youtubebb/frames/val*/*/*.jpg')
d = {key.split('/')[-1]: value for (value, key) in enumerate(img_path)}
for n in range(frame_num):
if df['object_presence'][n]:
frame_name = df['youtube_id'][n] + '_' + str(df['timestamp_ms'][n]) + '_' + \
str(df['class_id'][n]) + '_' + str(df['object_id'][n]) + '.jpg'
bbox = np.array([df['xmin'][n],df['ymin'][n],df['xmax'][n],df['ymax'][n]])
if frame_name in d.keys():
frame_path = img_path[d[frame_name]]
if visual:
im = cv2.imread(frame_path)
h, w, _ = im.shape
pt1 = (int(bbox[0]*w), int(bbox[1]*h))
pt2 = (int(bbox[2]*w), int(bbox[3]*h))
cv2.rectangle(im, pt1, pt2, (0, 255, 0), 2)
cv2.imshow('img', im)
cv2.waitKey(100)
else:
print('no image: {}'.format(frame_name))
pass
else:
pass
print('done')
|
packages/pyright-internal/src/tests/samples/dataclassTransform4.py | Microsoft/pyright | 3,934 | 12655243 | # This sample tests the case where a field descriptor has an implicit
# "init" parameter type based on an overload.
from typing import (
Any,
Callable,
Literal,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
T = TypeVar("T")
class ModelField:
def __init__(
self,
*,
default: Optional[Any] = ...,
init: Optional[bool] = True,
**kwargs: Any,
) -> None:
...
@overload
def field(
*,
default: Optional[str] = None,
resolver: Callable[[], Any],
init: Literal[False] = False,
) -> Any:
...
@overload
def field(
*,
default: Optional[str] = None,
resolver: None = None,
init: Literal[True] = True,
) -> Any:
...
def field(
*,
default: Optional[str] = None,
resolver: Optional[Callable[[], Any]] = None,
init: bool = True,
) -> Any:
...
def __dataclass_transform__(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_specifiers: Tuple[Union[type, Callable[..., Any]], ...] = (()),
) -> Callable[[T], T]:
# If used within a stub file, the following implementation can be
# replaced with "...".
return lambda a: a
@__dataclass_transform__(kw_only_default=True, field_specifiers=(field,))
def create_model(*, init: bool = True) -> Callable[[Type[T]], Type[T]]:
...
@create_model()
class CustomerModel:
id: int = field(resolver=lambda: 0)
name: str = field(default="Voldemort")
CustomerModel()
CustomerModel(name="hi")
# This should generate an error because "id" is not
# supposed to be part of the init function.
CustomerModel(id=1, name="hi")
|
optimus/engines/cudf/io/save.py | ironmussa/Optimus | 1,045 | 12655247 | <reponame>ironmussa/Optimus<filename>optimus/engines/cudf/io/save.py
import warnings
import pandavro as pdx
from optimus.engines.base.io.save import BaseSave
from optimus.helpers.logger import logger
from optimus.helpers.types import *
from optimus.engines.base.io.save import BaseSave
class Save(BaseSave):
def __init__(self, root: 'DataFrameType'):
self.root = root
def json(self, path, mode="w", *args, **kwargs):
df = self.root.data
try:
df.to_json(path, mode=mode, *args, **kwargs)
except IOError as e:
logger.print(e)
raise
def csv(self, path, mode="rb", *args, **kwargs):
try:
dfd = self.root.cols.cast("*", "str").data
dfd.to_csv(path, index=False, mode=mode, *args, **kwargs)
except IOError as error:
logger.print(error)
raise
def parquet(self, path, mode="overwrite", num_partitions=1, *args, **kwargs):
# This character are invalid as column names by parquet
invalid_character = [" ", ",", ";", "{", "}", "(", ")", "\n", "\t", "="]
def func(col_name):
for i in invalid_character:
col_name = col_name.replace(i, "_")
return col_name
df = self.root.cols.rename(func)
try:
df.data.to_parquet(path, mod=mode, numpartitions=num_partitions)
except IOError as e:
logger.print(e)
raise
def excel(self, path, **kwargs):
try:
# df = self.root.data
# columns = parse_columns(self, "*",
# filter_by_column_types=["date", "array", "vector", "binary", "null"])
df = self.root.cols.cast("*", "str")
# Dask reference
# https://docs.dask.org/en/latest/dataframe-api.html#dask.dataframe.to_csv
df.to_pandas().to_excel(path, index=False)
except IOError as error:
logger.print(error)
raise
def orc(self, path, **kwargs):
try:
df = self.root.data
df.to_orc(path, index=False, **kwargs)
except IOError as error:
logger.print(error)
raise
def avro(self, path, **kwargs):
warnings.warn("Using CPU via pandavro to read the avro dataset")
pdx.to_avro(path, self.root.to_pandas())
|
notebook/str_swap.py | vhn0912/python-snippets | 174 | 12655251 | s = 'one two one two one'
print(s.replace('one', 'two').replace('two', 'one'))
# one one one one one
print(s.replace('one', 'X').replace('two', 'one').replace('X', 'two'))
# two one two one two
def swap_str(s_org, s1, s2, temp='*q@w-e~r^'):
return s_org.replace(s1, temp).replace(s2, s1).replace(temp, s2)
print(swap_str(s, 'one', 'two'))
# two one two one two
print(s.replace('o', 't').replace('t', 'o'))
# one owo one owo one
print(s.translate(str.maketrans({'o': 't', 't': 'o'})))
# tne owt tne owt tne
print(s.translate(str.maketrans('ot', 'to')))
# tne owt tne owt tne
|
pycoin/coins/bitcoin/SolutionChecker.py | jaschadub/pycoin | 1,210 | 12655252 | <gh_stars>1000+
from .ScriptTools import BitcoinScriptTools
from .VM import BitcoinVM
from ...encoding.bytes32 import from_bytes_32
from pycoin.satoshi import errno
from pycoin.satoshi.flags import (
SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ANYONECANPAY,
VERIFY_P2SH, VERIFY_SIGPUSHONLY, VERIFY_CLEANSTACK,
VERIFY_WITNESS, VERIFY_MINIMALIF, VERIFY_WITNESS_PUBKEYTYPE
)
from .SegwitChecker import SegwitChecker
from .P2SChecker import P2SChecker
class TxContext(object):
pass
class BitcoinSolutionChecker(SegwitChecker, P2SChecker):
VM = BitcoinVM
ScriptTools = BitcoinScriptTools
DEFAULT_FLAGS = VERIFY_P2SH | VERIFY_WITNESS
def __init__(self, tx):
self.tx = tx
# self.sighash_cache = {}
def _delete_signature(self, script, sig_blob):
"""
Returns a script with the given subscript removed. The subscript
must appear in the main script aligned to opcode boundaries for it
to be removed.
"""
subscript = self.ScriptTools.compile_push_data_list([sig_blob])
new_script = bytearray()
pc = 0
for opcode, data, pc, new_pc in self.ScriptTools.get_opcodes(script):
section = script[pc:new_pc]
if section != subscript:
new_script.extend(section)
return bytes(new_script)
def _make_sighash_f(self, tx_in_idx):
def sig_for_hash_type_f(hash_type, sig_blobs, vm):
script = vm.script[vm.begin_code_hash:]
for sig_blob in sig_blobs:
script = self._delete_signature(script, sig_blob)
return self._signature_hash(script, tx_in_idx, hash_type)
return sig_for_hash_type_f
def _solution_script_to_stack(self, tx_context, flags, traceback_f):
if flags & VERIFY_SIGPUSHONLY:
self._check_script_push_only(tx_context.solution_script)
# never use VERIFY_MINIMALIF or VERIFY_WITNESS_PUBKEYTYPE except in segwit
f1 = flags & ~(VERIFY_MINIMALIF | VERIFY_WITNESS_PUBKEYTYPE)
vm = self.VM(tx_context.solution_script, tx_context, self._make_sighash_f(tx_context.tx_in_idx), f1)
vm.is_solution_script = True
vm.traceback_f = traceback_f
solution_stack = vm.eval_script()
return solution_stack
def _check_script_push_only(self, script):
scriptStreamer = self.VM.ScriptStreamer
pc = 0
while pc < len(script):
opcode, data, pc, is_ok = scriptStreamer.get_opcode(script, pc)
if opcode not in scriptStreamer.data_opcodes:
raise self.ScriptError("signature has non-push opcodes", errno.SIG_PUSHONLY)
def _tx_in_for_idx(self, idx, tx_in, tx_out_script, unsigned_txs_out_idx):
if idx == unsigned_txs_out_idx:
return self.tx.TxIn(tx_in.previous_hash, tx_in.previous_index, tx_out_script, tx_in.sequence)
return self.tx.TxIn(tx_in.previous_hash, tx_in.previous_index, b'', tx_in.sequence)
@classmethod
def delete_subscript(class_, script, subscript):
"""
Returns a script with the given subscript removed. The subscript
must appear in the main script aligned to opcode boundaries for it
to be removed.
"""
new_script = bytearray()
pc = 0
for opcode, data, pc, new_pc in class_.ScriptTools.get_opcodes(script):
section = script[pc:new_pc]
if section != subscript:
new_script.extend(section)
return bytes(new_script)
def _signature_hash(self, tx_out_script, unsigned_txs_out_idx, hash_type):
"""
Return the canonical hash for a transaction. We need to
remove references to the signature, since it's a signature
of the hash before the signature is applied.
:param tx_out_script: the script the coins for unsigned_txs_out_idx are coming from
:param unsigned_txs_out_idx: where to put the tx_out_script
:param hash_type: one of SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ALL,
optionally bitwise or'ed with SIGHASH_ANYONECANPAY
"""
# In case concatenating two scripts ends up with two codeseparators,
# or an extra one at the end, this prevents all those possible incompatibilities.
tx_out_script = self.delete_subscript(tx_out_script, self.ScriptTools.compile("OP_CODESEPARATOR"))
# blank out other inputs' signatures
txs_in = [self._tx_in_for_idx(i, tx_in, tx_out_script, unsigned_txs_out_idx)
for i, tx_in in enumerate(self.tx.txs_in)]
txs_out = self.tx.txs_out
# Blank out some of the outputs
if (hash_type & 0x1f) == SIGHASH_NONE:
# Wildcard payee
txs_out = []
# Let the others update at will
for i in range(len(txs_in)):
if i != unsigned_txs_out_idx:
txs_in[i].sequence = 0
elif (hash_type & 0x1f) == SIGHASH_SINGLE:
# This preserves the ability to validate existing legacy
# transactions which followed a buggy path in Satoshi's
# original code.
if unsigned_txs_out_idx >= len(txs_out):
# This should probably be moved to a constant, but the
# likelihood of ever getting here is already really small
# and getting smaller
return (1 << 248)
# Only lock in the txout payee at same index as txin; delete
# any outputs after this one and set all outputs before this
# one to "null" (where "null" means an empty script and a
# value of -1)
txs_out = [self.tx.TxOut(0xffffffffffffffff, b'')] * unsigned_txs_out_idx
txs_out.append(self.tx.txs_out[unsigned_txs_out_idx])
# Let the others update at will
for i in range(len(txs_in)):
if i != unsigned_txs_out_idx:
txs_in[i].sequence = 0
# Blank out other inputs completely, not recommended for open transactions
if hash_type & SIGHASH_ANYONECANPAY:
txs_in = [txs_in[unsigned_txs_out_idx]]
tmp_tx = self.tx.__class__(self.tx.version, txs_in, txs_out, self.tx.lock_time)
return from_bytes_32(tmp_tx.hash(hash_type=hash_type))
def tx_context_for_idx(self, tx_in_idx):
"""
solution_script: alleged solution to the puzzle_script
puzzle_script: the script protecting the coins
"""
tx_in = self.tx.txs_in[tx_in_idx]
tx_context = TxContext()
tx_context.lock_time = self.tx.lock_time
tx_context.version = self.tx.version
tx_context.puzzle_script = b'' if self.tx.missing_unspent(tx_in_idx) else self.tx.unspents[tx_in_idx].script
tx_context.solution_script = tx_in.script
tx_context.witness_solution_stack = tx_in.witness
tx_context.sequence = tx_in.sequence
tx_context.tx_in_idx = tx_in_idx
return tx_context
def check_solution(self, tx_context, flags=None, traceback_f=None):
"""
tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check
"""
for t in self.puzzle_and_solution_iterator(tx_context, flags=flags, traceback_f=traceback_f):
puzzle_script, solution_stack, flags, sighash_f = t
vm = self.VM(puzzle_script, tx_context, sighash_f, flags=flags, initial_stack=solution_stack[:])
vm.is_solution_script = False
vm.traceback_f = traceback_f
stack = vm.eval_script()
if len(stack) == 0 or not vm.bool_from_script_bytes(stack[-1]):
raise self.ScriptError("eval false", errno.EVAL_FALSE)
if flags & VERIFY_CLEANSTACK and len(stack) != 1:
raise self.ScriptError("stack not clean after evaluation", errno.CLEANSTACK)
def puzzle_and_solution_iterator(self, tx_context, flags=None, traceback_f=None):
if flags is None:
flags = self.DEFAULT_FLAGS
solution_stack = self._solution_script_to_stack(tx_context, flags=flags, traceback_f=traceback_f)
puzzle_script = tx_context.puzzle_script
flags_1 = flags & ~(VERIFY_MINIMALIF | VERIFY_WITNESS_PUBKEYTYPE)
sighash_f = self._make_sighash_f(tx_context.tx_in_idx)
yield puzzle_script, solution_stack, flags_1, sighash_f
p2sh_tuple = self.p2s_program_tuple(tx_context, puzzle_script, solution_stack, flags_1, sighash_f)
if p2sh_tuple:
yield p2sh_tuple
puzzle_script, solution_stack = p2sh_tuple[:2]
is_p2sh = p2sh_tuple is not None
witness_tuple = self.witness_program_tuple(tx_context, puzzle_script, solution_stack, flags, is_p2sh)
if witness_tuple:
yield witness_tuple
|
app/model/base.py | snowdensb/braindump | 631 | 12655270 | <filename>app/model/base.py
from datetime import datetime
from app import db
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
|
hls4ml/backends/vivado/passes/garnet_templates.py | jaemyungkim/hls4ml | 380 | 12655276 | <filename>hls4ml/backends/vivado/passes/garnet_templates.py
import numpy as np
from hls4ml.model.types import FixedPrecisionType
from hls4ml.backends.fpga.fpga_types import APTypeConverter
from hls4ml.model.layers import GarNet, GarNetStack
from hls4ml.backends.template import LayerConfigTemplate, FunctionCallTemplate
# GarNet templates
garnet_common_config_template = """
static const unsigned n_vertices = {n_vertices};
static const unsigned n_vertices_width = {n_vertices_width};
static const unsigned n_in_features = {n_in_features};
static const unsigned distance_width = {distance_width};
static const unsigned output_collapse = {collapse_type};
static const bool mean_by_nvert = {mean_by_nvert};
typedef {norm_t} norm_t;
typedef ap_fixed<{distance_width}, {distance_nint}, AP_TRN, AP_SAT> distance_t;
typedef {edge_weight_t} edge_weight_t;
typedef {edge_weight_aggr_t} edge_weight_aggr_t;
typedef {aggr_t} aggr_t;
typedef {output_t} output_t;
static const unsigned reuse_factor = {reuse};
static const unsigned log2_reuse_factor = {log2_reuse};
"""
garnet_config_template = """struct config{index} : nnet::garnet_config {{"""
garnet_config_template += garnet_common_config_template
garnet_config_template += """
static const unsigned n_propagate = {n_propagate};
static const unsigned n_aggregators = {n_aggregators};
static const unsigned n_out_features = {n_out_features};
typedef {input_transform_weights_t} input_transform_weights_t;
typedef {input_transform_biases_t} input_transform_biases_t;
typedef {aggregator_distance_weights_t} aggregator_distance_weights_t;
typedef {aggregator_distance_biases_t} aggregator_distance_biases_t;
typedef {output_transform_weights_t} output_transform_weights_t;
typedef {output_transform_biases_t} output_transform_biases_t;
static const input_transform_weights_t (&input_transform_weights)[{input_transform_weights_size}];
static const input_transform_biases_t (&input_transform_biases)[{input_transform_biases_size}];
static const aggregator_distance_weights_t (&aggregator_distance_weights)[{aggregator_distance_weights_size}];
static const aggregator_distance_biases_t (&aggregator_distance_biases)[{aggregator_distance_biases_size}];
static const output_transform_weights_t (&output_transform_weights)[{output_transform_weights_size}];
static const output_transform_biases_t (&output_transform_biases)[{output_transform_biases_size}];
typedef config{index} base_t;
}};
const config{index}::input_transform_weights_t (&config{index}::input_transform_weights)[{input_transform_weights_size}] = {input_transform_weights};
const config{index}::input_transform_biases_t (&config{index}::input_transform_biases)[{input_transform_biases_size}] = {input_transform_biases};
const config{index}::aggregator_distance_weights_t (&config{index}::aggregator_distance_weights)[{aggregator_distance_weights_size}] = {aggregator_distance_weights};
const config{index}::aggregator_distance_biases_t (&config{index}::aggregator_distance_biases)[{aggregator_distance_biases_size}] = {aggregator_distance_biases};
const config{index}::output_transform_weights_t (&config{index}::output_transform_weights)[{output_transform_weights_size}] = {output_transform_weights};
const config{index}::output_transform_biases_t (&config{index}::output_transform_biases)[{output_transform_biases_size}] = {output_transform_biases};
"""
garnet_function_template = 'nnet::garnet{impl}<{input_t}, {integer_input_t}, {output_t}, {config}>({input}, {nvtx}, {output});'
garnet_include_list = ['nnet_utils/nnet_garnet.h']
class GarNetConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(GarNet)
self.template = (garnet_config_template, )
def get_transforms_config(self, node, params):
params['n_in_features'] = node.attributes['n_in_features']
params['n_propagate'] = node.attributes['n_propagate']
params['n_aggregators'] = node.get_weights('aggregator_distance_biases').shape[0]
params['n_out_features'] = node.get_weights('output_transform_biases').shape[0]
for wname, weights in node.weights.items():
params[wname] = weights.name
params['{}_t'.format(wname)] = weights.type.name
params['{}_size'.format(wname)] = weights.data_length
def format(self, node):
params = self._default_config_params(node)
params['n_vertices'] = node.attributes['n_vertices']
params['n_vertices_width'] = int(np.log2(params['n_vertices']))
params['distance_width'] = 12
params['distance_nint'] = min(4, params['distance_width'] - 6) # this is tuned
params['log2_reuse'] = int(np.log2(params['reuse']))
## Define default precisions for various internal arrays (can be overridden from the config file)
# We always give 10 digits for the subintegral part
fwidth = 10
# Integral precision for aggr_t depends on how large the temporary sum for weighed feature mean will be
aggr_intw = max(params['log2_reuse'], params['n_vertices_width'] - params['log2_reuse']) + 3 # safety factor 2**3
aggr_w = aggr_intw + fwidth
# edge_weight_aggr_t does not need the safety factor
ew_aggr_intw = aggr_intw - 3
ew_aggr_w = ew_aggr_intw + fwidth
# Integral precision for norm is fixed to 4
norm_intw = 4
norm_w = norm_intw + fwidth
vspecs = [
('edge_weight', FixedPrecisionType(10, 0, signed=False)),
('edge_weight_aggr', FixedPrecisionType(ew_aggr_w, ew_aggr_intw, signed=False)),
('aggr', FixedPrecisionType(aggr_w, aggr_intw)),
('norm', FixedPrecisionType(norm_w, norm_intw, signed=False))
]
precision_converter = APTypeConverter()
for vname, default_precision in vspecs:
params['{}_t'.format(vname)], type_name = node.model.config.get_precision(node, var=vname)
if type_name.endswith('default_t'):
params['{}_t'.format(vname)] = precision_converter.convert(default_precision).definition_cpp()
params['output_t'] = node.get_output_variable().type.name
if node.attributes['collapse'] in ['mean', 'max']:
params['collapse_type'] = 'collapse_{}'.format(node.attributes['collapse'])
else:
params['collapse_type'] = 'no_collapse'
params['mean_by_nvert'] = str(node.attributes['mean_by_nvert']).lower()
self.get_transforms_config(node, params)
return self.template[0].format(**params)
class GarNetFunctionTemplate(FunctionCallTemplate):
def __init__(self):
super().__init__(GarNet, include_header=garnet_include_list)
self.template = garnet_function_template
def format(self, node):
params = self._default_function_params(node)
data = node.get_input_variable(node.inputs[0])
integer_input = node.get_input_variable(node.inputs[1])
params['input_t'] = data.type.name
params['input'] = data.name
params['integer_input_t'] = integer_input.type.name
params['nvtx'] = integer_input.name
if node.ref_impl:
params['impl'] = '_ref'
else:
params['impl'] = ''
return self.template.format(**params)
# GarNetStack Templates
garnet_stack_base_config_template = """struct config{index}_base : nnet::garnet_config {{"""
garnet_stack_base_config_template += garnet_common_config_template
garnet_stack_base_config_template += """
static const bool is_stack = true;
typedef config{index}_base base_t;
}};
struct config{index} : config{index}_base {{
static const unsigned n_sublayers = {n_sublayers};
template<int L>
struct sublayer_t : config{index}_base {{}};
}};
{sublayer_configs}
"""
garnet_stack_sublayer_config_template = """template<>
struct config{index}::sublayer_t<{il}> : config{index}_base {{
static const unsigned n_in_features = {n_in_features};
static const unsigned n_propagate = {n_propagate};
static const unsigned n_aggregators = {n_aggregators};
static const unsigned n_out_features = {n_out_features};
typedef {input_transform_weights_t} input_transform_weights_t;
typedef {input_transform_biases_t} input_transform_biases_t;
typedef {aggregator_distance_weights_t} aggregator_distance_weights_t;
typedef {aggregator_distance_biases_t} aggregator_distance_biases_t;
typedef {output_transform_biases_t} output_transform_biases_t;
static const input_transform_weights_t (&input_transform_weights)[{input_transform_weights_size}];
static const input_transform_biases_t (&input_transform_biases)[{input_transform_biases_size}];
static const aggregator_distance_weights_t (&aggregator_distance_weights)[{aggregator_distance_weights_size}];
static const aggregator_distance_biases_t (&aggregator_distance_biases)[{aggregator_distance_biases_size}];
static const output_transform_biases_t (&output_transform_biases)[{output_transform_biases_size}];
typedef config{index}::sublayer_t<{next}> next_layer_t;
}};
const config{index}::sublayer_t<{il}>::input_transform_weights_t (&config{index}::sublayer_t<{il}>::input_transform_weights)[{input_transform_weights_size}] = {input_transform_weights};
const config{index}::sublayer_t<{il}>::input_transform_biases_t (&config{index}::sublayer_t<{il}>::input_transform_biases)[{input_transform_biases_size}] = {input_transform_biases};
const config{index}::sublayer_t<{il}>::aggregator_distance_weights_t (&config{index}::sublayer_t<{il}>::aggregator_distance_weights)[{aggregator_distance_weights_size}] = {aggregator_distance_weights};
const config{index}::sublayer_t<{il}>::aggregator_distance_biases_t (&config{index}::sublayer_t<{il}>::aggregator_distance_biases)[{aggregator_distance_biases_size}] = {aggregator_distance_biases};
const config{index}::sublayer_t<{il}>::output_transform_biases_t (&config{index}::sublayer_t<{il}>::output_transform_biases)[{output_transform_biases_size}] = {output_transform_biases};
"""
garnet_stack_config_template = (garnet_stack_base_config_template, garnet_stack_sublayer_config_template)
garnet_stack_function_template = 'nnet::garnet_stack<{input_t}, {integer_input_t}, {output_t}, {config}>({input}, {nvtx}, {output});'
class GarNetStackConfigTemplate(GarNetConfigTemplate):
def __init__(self):
super(GarNetConfigTemplate, self).__init__(GarNetStack)
self.template = garnet_stack_config_template
def get_transforms_config(self, node, params):
_, sublayer_template = self.template
params['n_sublayers'] = node.attributes['n_sublayers']
params['n_in_features'] = node.attributes['n_in_features'][0]
params['n_out_features'] = node.attributes['n_out_features'][-1]
sublayer_configs = []
for il in range(node.attributes['n_sublayers'] - 1, -1, -1):
sub_params = {'index': node.index, 'il': il}
for p in ['n_in_features', 'n_propagate', 'n_aggregators', 'n_out_features']:
sub_params[p] = node.attributes[p][il]
for wname, weights in node._sublayer_weights[il].items():
sub_params[wname] = weights.name
sub_params['{}_t'.format(wname)] = weights.type.name
sub_params['{}_size'.format(wname)] = weights.data_length
if il != node.attributes['n_sublayers'] - 1:
sub_params['next'] = il + 1
else:
sub_params['next'] = 0
sublayer_configs.append(sublayer_template.format(**sub_params))
params['sublayer_configs'] = '\n'.join(sublayer_configs)
class GarNetStackFunctionTemplate(GarNetFunctionTemplate):
def __init__(self):
super(GarNetFunctionTemplate, self).__init__(GarNetStack, include_header=garnet_include_list)
self.template = garnet_stack_function_template
|
xception_tf/test_tf_xception.py | rickyHong/Light-Head-RCNN-enhanced-Xdetector | 116 | 12655293 | <gh_stars>100-1000
import numpy as np
import sys
import os
import tensorflow as tf
import tf_xception_
input_placeholder, output = tf_xception_.KitModel('./xception.npy')
for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
print(var.op.name)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
predict = sess.run(output, feed_dict = {input_placeholder : np.ones((1,299,299,3)) * 0.5})
print(predict)
print(np.argmax(predict))
|
datadog_nagios_plugin_wrapper/checks.d/nagios_plugin_wrapper.py | nadaj/Miscellany | 155 | 12655297 | import re
from datadog_checks.base import AgentCheck
from datadog_checks.base.errors import CheckException
from datadog_checks.utils.subprocess_output import get_subprocess_output
__version__ = "1.0.0"
__author__ = "<NAME> <<EMAIL>>"
class NagiosPluginWrapperCheck(AgentCheck):
PERFDATA_RE = (
r"([^\s]+|'[^']+')=([-.\d]+)(c|s|ms|us|B|KB|MB|GB|TB|%)?" +
r"(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?")
def check(self, instance):
check_command = instance.get('check_command')
metric_namespace = instance.get('metric_namespace')
tags = instance.get('tags', [])
create_service_check = instance.get('create_service_check', False)
if not check_command:
raise CheckException("Configuration error. Missing check_command definition, please fix nagios_plugin_wrapper.yaml")
if not metric_namespace:
raise CheckException("Configuration error. Missing metric_namespace definition, please fix nagios_plugin_wrapper.yaml")
raw_output = None
err = None
ret = None
try:
raw_output, err, ret = get_subprocess_output(check_command, self.log)
except Exception as e:
error = "Failed to execute check_command {check_command} - {error}".format(
check_command=check_command, error=e)
self.log.warning(error)
raise CheckException("check_command '{check_command}' failed to execute, see agent.log for more information.".format(
check_command=check_command
))
output, metrics = self._parse_output(raw_output)
if metrics:
metrics = self._parse_perfdata(metrics)
for label, value in metrics:
label = self._sanitize(label)
self.log.debug("metric_namespace: {namespace} | tags: {tags} | value: {value} | ret_code: {ret}".format(
namespace=metric_namespace, tags=tags, value=value, ret=ret))
self.gauge('{metric_namespace}.{label}'.format(
metric_namespace=metric_namespace, label=label), value, tags=tags)
if output and create_service_check:
if ret == 0:
status = AgentCheck.OK
elif ret == 1:
status = AgentCheck.WARNING
elif ret == 2:
status = AgentCheck.CRITICAL
else:
status = AgentCheck.UNKNOWN
self.service_check(metric_namespace, status, tags=tags, message=output.rstrip())
def _parse_output(self, s):
"""Parse the output text and performance data string"""
try:
output, metrics = s.rsplit('|', 1)
except ValueError:
self.log.debug("No performance data found in string: {string}, skipping...".format(
string=s))
return s, None
return output, metrics
def _parse_perfdata(self, s):
"""Parse performance data from a perfdata string"""
metrics = []
counters = re.findall(self.PERFDATA_RE, s)
if counters is None:
self.log.warning("Failed to parse performance data: {s}".format(
s=s))
return metrics
for (key, value, uom, warn, crit, min, max) in counters:
try:
norm_value = self._normalize_to_unit(float(value), uom)
metrics.append((key, norm_value))
except ValueError:
self.log.warning(
"Couldn't convert value '{value}' to float".format(
value=value))
return metrics
def _normalize_to_unit(self, value, unit):
"""Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB).
"""
if unit == 'ms':
return value / 1000.0
if unit == 'us':
return value / 1000000.0
if unit == 'KB':
return value * 1024
if unit == 'MB':
return value * 1024 * 1024
if unit == 'GB':
return value * 1024 * 1024 * 1024
if unit == 'TB':
return value * 1024 * 1024 * 1024 * 1024
return value
def _sanitize(self, s):
"""Sanitize the name of a metric to remove unwanted chars
"""
return re.sub("[^\w-]", "", s)
|
backend/src/publisher/processing/pubsub.py | rutvikpadhiyar000/github-trends | 157 | 12655331 | <gh_stars>100-1000
from typing import Optional
from src.utils.pubsub import publish_to_topic
def publish_user(user_id: str, access_token: Optional[str] = None):
publish_to_topic("user", {"user_id": user_id, "access_token": access_token})
|
mGui/examples/basicList.py | theodox/mGui | 105 | 12655350 | <filename>mGui/examples/basicList.py
import maya.cmds as cmds
import random
from mGui import gui, forms, lists
from mGui.bindings import bind
from mGui.observable import ViewCollection
def basic_list_binding():
'''
Illustrates the basics of binding to a list. The collection 'bound' contains some strings, and we
bind it to the VerticalList 'list_view'.
Adding items to the collection automatically redraws the list with the new items. In this case they are
drawn with buttons, but lists allow you to customize the appearance of items extensively.
This example also illustrates how to use closures to capture inter-object references, and how to keep callback
functions alive without creating a full class.
'''
with gui.BindingWindow(title='example window', menuBar=True) as test_window:
bound = ViewCollection('pPlane1', 'pCube2')
with forms.VerticalThreePane() as main:
header = gui.Text(label="List classes make it easy to manage collections")
list_view = lists.VerticalList(synchronous=True)
bound > bind() > list_view.collection
with forms.HorizontalStretchForm() as buttons:
more = gui.Button(label='Add another')
close = gui.Button(label='close')
# use closures to capture the UI names without a full class
def close_window(*_, **__):
cmds.deleteUI(test_window)
def show_more(*_, **__):
r = random.choice(("pPlane", "pCube", "pSphere")) + str(random.randint(2, 20))
bound.append(r)
# bind the functions to the handlers
close.command += close_window, test_window
more.command += show_more, test_window
return test_window
if __name__ == '__main__':
the_window = basic_list_binding()
the_window.show()
|
example/PUR/trainPUR-Reg.py | csiro-hydroinformatics/hydroDL | 109 | 12655364 | <reponame>csiro-hydroinformatics/hydroDL
import sys
sys.path.append('../')
from hydroDL import master
from hydroDL.master import default
from hydroDL.data import camels
from hydroDL.model import rnn, crit, train
import json
import os
import numpy as np
import torch
import random
# Options for different interface
interfaceOpt = 1
# ==1 default, the improved and more interpretable version. It's easier to see the data flow, model setup and training
# process. Recommended for most users.
# ==0 the original "pro" version we used to run heavy jobs for the paper. It was later improved for clarity to obtain option 1.
# Results are very similar for two options and have little difference in computational performance.
Action = [1, 2]
# Using Action options to control training different models
# 1: Train Base LSTM PUR Models without integrating any soft info
# 2: Train CNN-LSTM to integrate FDCs
# Hyperparameters
EPOCH = 300
BATCH_SIZE=100
RHO=365
HIDDENSIZE=256
saveEPOCH = 10 # save model for every "saveEPOCH" epochs
Ttrain=[19851001, 19951001] # training period
LCrange = [19851001, 19951001]
# Define root directory of database and output
# Modify this based on your own location of CAMELS dataset
# Following the data download instruction in README file, you should organize the folders like
# 'your/path/to/Camels/basin_timeseries_v1p2_metForcing_obsFlow' and 'your/path/to/Camels/camels_attributes_v2.0'
# Then 'rootDatabase' here should be 'your/path/to/Camels'
# You can also define the database directory in hydroDL/__init__.py by modifying pathCamels['DB'] variable
rootDatabase = os.path.join(os.path.sep, 'scratch', 'Camels') # CAMELS dataset root directory
camels.initcamels(rootDatabase) # initialize three camels module-scope variables in camels.py: dirDB, gageDict, statDict
rootOut = os.path.join(os.path.sep, 'data', 'rnnStreamflow') # Model output root directory
# define random seed
# seedid = [159654, 109958, 257886, 142365, 229837, 588859] # six seeds randomly generated using np.random.uniform
seedid = 159654
random.seed(seedid)
torch.manual_seed(seedid)
np.random.seed(seedid)
torch.cuda.manual_seed(seedid)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Fix seed for training, change it to have different runnings with different seeds
# We use the mean discharge of 6 runnings with different seeds to account for randomness
# directory to save training results
exp_name='PUR'
exp_disp='Testrun'
save_path = os.path.join(exp_name, exp_disp, str(seedid))
# Divide CAMELS dataset into 7 PUR regions
gageinfo = camels.gageDict
hucinfo = gageinfo['huc']
gageid = gageinfo['id']
# get the id list of each region
regionID = list()
regionNum = list()
regionDivide = [ [1,2], [3,6], [4,5,7], [9,10], [8,11,12,13], [14,15,16,18], [17] ] # seven regions
for ii in range(len(regionDivide)):
tempcomb = regionDivide[ii]
tempregid = list()
for ih in tempcomb:
tempid = gageid[hucinfo==ih].tolist()
tempregid = tempregid + tempid
regionID.append(tempregid)
regionNum.append(len(tempregid))
# Only for interfaceOpt=0 using multiple GPUs, not used here
# cid = 0 # starting GPU id
# gnum = 6 # how many GPUs you have
# Region withheld as testing target. Take region 1 as an example.
# Change this to 1,2,..,7 to run models for all 7 PUR regions in CONUS.
testRegion = 1
iexp = testRegion - 1 # index
TestLS = regionID[iexp] # basin ID list for testing, should be withheld for training
TrainLS = list(set(gageid.tolist()) - set(TestLS)) # basin ID for training
gageDic = {'TrainID': TrainLS, 'TestID': TestLS}
# prepare the training dataset
optData = default.optDataCamels
optData = default.update(optData, tRange=Ttrain, subset=TrainLS, lckernel=None, fdcopt=False)
climateList = camels.attrLstSel + ['p_mean','pet_mean','p_seasonality','frac_snow','aridity','high_prec_freq',
'high_prec_dur','low_prec_freq','low_prec_dur']
# climateList = ['slope_mean', 'area_gages2', 'frac_forest', 'soil_porosity', 'max_water_content']
# climateList = []
optData = default.update(optData, varT=camels.forcingLst, varC= climateList)
# varT: forcing used for training varC: attributes used for training
# The above controls what attributes used for training, change varC for input-selection-ensemble
# for 5 attributes model: climateList = ['slope_mean', 'area_gages2', 'frac_forest', 'soil_porosity', 'max_water_content']
# for no-attribute model: varC = []
# the input-selection ensemble represents using the mean prediction of full, 5-attr and no-attr models,
# in total the mean of 3(different attributes)*6(different random seeds) = 18 models
if interfaceOpt == 1:
# read data from CAMELS dataset
df = camels.DataframeCamels(
subset=optData['subset'], tRange=optData['tRange'])
x = df.getDataTs(
varLst=optData['varT'],
doNorm=False,
rmNan=False)
y = df.getDataObs(
doNorm=False,
rmNan=False,
basinnorm=True)
# "basinnorm = True" will call camels.basinNorm() on the original discharge data. This will transform discharge
# from ft3/s to mm/day and then divided by mean precip to be dimensionless. output = discharge/(area*mean_precip)
c = df.getDataConst(
varLst=optData['varC'],
doNorm=False,
rmNan=False)
# process, do normalization and remove nan
series_data = np.concatenate([x, y], axis=2)
seriesvarLst = camels.forcingLst + ['runoff']
# calculate statistics for normalization and save to a dictionary
statDict = camels.getStatDic(attrLst=climateList, attrdata=c, seriesLst=seriesvarLst, seriesdata=series_data)
# normalize
attr_norm = camels.transNormbyDic(c, climateList, statDict, toNorm=True)
attr_norm[np.isnan(attr_norm)] = 0.0
series_norm = camels.transNormbyDic(series_data, seriesvarLst, statDict, toNorm=True)
# prepare the inputs
xTrain = series_norm[:,:,:-1] # forcing, not include obs
xTrain[np.isnan(xTrain)] = 0.0
yTrain = np.expand_dims(series_norm[:,:,-1], 2)
if attr_norm.size == 0: # [], no-attribute case
attrs = None
Nx = xTrain.shape[-1]
else:
# with attributes
attrs=attr_norm
Nx = xTrain.shape[-1] + attrs.shape[-1]
Ny = yTrain.shape[-1]
# define loss function
optLoss = default.optLossRMSE
lossFun = crit.RmseLoss()
# configuration for training
optTrain = default.update(default.optTrainCamels, miniBatch=[BATCH_SIZE, RHO], nEpoch=EPOCH, saveEpoch=saveEPOCH, seed=seedid)
hucdic = 'Reg-'+str(iexp+1)+'-Num'+str(regionNum[iexp])
if 1 in Action:
# Train base LSTM PUR model
out = os.path.join(rootOut, save_path, hucdic,'Reg-85-95-Sub-Full')
# out = os.path.join(rootOut, save_path, hucdic,'Reg-85-95-Sub-5attr')
# out = os.path.join(rootOut, save_path, hucdic,'Reg-85-95-Sub-Noattr')
if not os.path.isdir(out):
os.makedirs(out)
# log training gage information
gageFile = os.path.join(out, 'gage.json')
with open(gageFile, 'w') as fp:
json.dump(gageDic, fp, indent=4)
# define model config
optModel = default.update(default.optLstm, name='hydroDL.model.rnn.CudnnLstmModel', hiddenSize=HIDDENSIZE)
if interfaceOpt == 1:
# define, load and train model
optModel = default.update(optModel, nx=Nx, ny=Ny)
model = rnn.CudnnLstmModel(nx=optModel['nx'], ny=optModel['ny'], hiddenSize=optModel['hiddenSize'])
# Wrap up all the training configurations to one dictionary in order to save into "out" folder
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.writeMasterFile(masterDict)
# log statistics
statFile = os.path.join(out, 'statDict.json')
with open(statFile, 'w') as fp:
json.dump(statDict, fp, indent=4)
# Train the model
trainedModel = train.trainModel(
model,
xTrain,
yTrain,
attrs,
lossFun,
nEpoch=EPOCH,
miniBatch=[BATCH_SIZE, RHO],
saveEpoch=saveEPOCH,
saveFolder=out)
if interfaceOpt == 0:
# Only need to pass the wrapped configuration dict 'masterDict' for training
# nx, ny will be automatically updated later
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.train(masterDict)
## Not used here.
## A potential way to run batch jobs simultaneously in background through multiple GPUs and Linux screens.
## To use this, must manually set the "pathCamels['DB']" in hydroDL/__init__.py as your own root path of CAMELS data.
## Use the following master.runTrain() instead of the above master.train().
# master.runTrain(masterDict, cudaID=cid % gnum, screen='test-'+str(cid))
# cid = cid + 1
if 2 in Action:
# Train CNN-LSTM PUR model to integrate FDCs
# LCrange defines from which period to get synthetic FDC
LCTstr = str(LCrange[0]) + '-' + str(LCrange[1])
out = os.path.join(rootOut, save_path, hucdic, 'Reg-85-95-Sub-Full-FDC' + LCTstr)
# out = os.path.join(rootOut, save_path, hucdic, 'Reg-85-95-Sub-5attr-FDC' + LCTstr)
# out = os.path.join(rootOut, save_path, hucdic, 'Reg-85-95-Sub-Noattr-FDC' + LCTstr)
if not os.path.isdir(out):
os.makedirs(out)
gageFile = os.path.join(out, 'gage.json')
with open(gageFile, 'w') as fp:
json.dump(gageDic, fp, indent=4)
optData = default.update(default.optDataCamels, tRange=Ttrain, subset=TrainLS,
lckernel=LCrange, fdcopt=True)
# define model
convNKS = [(10, 5, 1), (5, 3, 3), (1, 1, 1)]
# CNN parameters for 3 layers: [(Number of kernels 10,5,1), (kernel size 5,3,3), (stride 1,1,1)]
optModel = default.update(default.optCnn1dLstm, name='hydroDL.model.rnn.CNN1dLCmodel',
hiddenSize=HIDDENSIZE, convNKS=convNKS, poolOpt=[2, 2, 1]) # use CNN-LSTM model
if interfaceOpt == 1:
# load data and create synthetic FDCs as inputs
dffdc = camels.DataframeCamels(subset=optData['subset'], tRange=optData['lckernel'])
datatemp = dffdc.getDataObs(
doNorm=False, rmNan=False, basinnorm=True)
# normalize data
dadata = camels.transNormbyDic(datatemp, 'runoff', statDict, toNorm=True)
dadata = np.squeeze(dadata) # dim Nbasin*Nday
fdcdata = master.master.calFDC(dadata)
print('FDC was calculated and used!')
xIn = (xTrain, fdcdata)
# load model
Nobs = xIn[1].shape[-1]
optModel = default.update(optModel, nx=Nx, ny=Ny, nobs=Nobs) # update input dims
convpara = optModel['convNKS']
model = rnn.CNN1dLCmodel(
nx=optModel['nx'],
ny=optModel['ny'],
nobs=optModel['nobs'],
hiddenSize=optModel['hiddenSize'],
nkernel=convpara[0],
kernelSize=convpara[1],
stride=convpara[2],
poolOpt=optModel['poolOpt'])
print('CNN1d Local calibartion Kernel is used!')
# Wrap up all the training configurations to one dictionary in order to save into "out" folder
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.writeMasterFile(masterDict)
# log statistics
statFile = os.path.join(out, 'statDict.json')
with open(statFile, 'w') as fp:
json.dump(statDict, fp, indent=4)
# Train the model
trainedModel = train.trainModel(
model,
xIn, # need to well defined
yTrain,
attrs,
lossFun,
nEpoch=EPOCH,
miniBatch=[BATCH_SIZE, RHO],
saveEpoch=saveEPOCH,
saveFolder=out)
if interfaceOpt == 0:
# Only need to pass the wrapped configuration 'masterDict' for training
# nx, ny, nobs will be automatically updated later
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.train(masterDict) # train model
# master.runTrain(masterDict, cudaID=cid % gnum, screen='test-'+str(cid))
# cid = cid + 1
|
worldengine/simulations/humidity.py | ctittel/worldengine | 946 | 12655369 | from worldengine.simulations.basic import find_threshold_f
import numpy
class HumiditySimulation(object):
@staticmethod
def is_applicable(world):
return world.has_precipitations() and world.has_irrigation() and (
not world.has_humidity())
def execute(self, world, seed):
assert seed is not None
data, quantiles = self._calculate(world)
world.humidity = (data, quantiles)
@staticmethod
def _calculate(world):
humids = world.humids
precipitationWeight = 1.0
irrigationWeight = 3
data = numpy.zeros((world.height, world.width), dtype=float)
data = (world.layers['precipitation'].data * precipitationWeight - world.layers['irrigation'].data * irrigationWeight)/(precipitationWeight + irrigationWeight)
# These were originally evenly spaced at 12.5% each but changing them
# to a bell curve produced better results
ocean = world.layers['ocean'].data
quantiles = {}
quantiles['12'] = find_threshold_f(data, humids[6], ocean)
quantiles['25'] = find_threshold_f(data, humids[5], ocean)
quantiles['37'] = find_threshold_f(data, humids[4], ocean)
quantiles['50'] = find_threshold_f(data, humids[3], ocean)
quantiles['62'] = find_threshold_f(data, humids[2], ocean)
quantiles['75'] = find_threshold_f(data, humids[1], ocean)
quantiles['87'] = find_threshold_f(data, humids[0], ocean)
return data, quantiles
|
Cython/Build/Tests/TestCyCache.py | smok-serwis/cython | 6,663 | 12655383 | <filename>Cython/Build/Tests/TestCyCache.py<gh_stars>1000+
import difflib
import glob
import gzip
import os
import tempfile
import Cython.Build.Dependencies
import Cython.Utils
from Cython.TestUtils import CythonTest
class TestCyCache(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self.temp_dir = tempfile.mkdtemp(
prefix='cycache-test',
dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None)
self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir)
def cache_files(self, file_glob):
return glob.glob(os.path.join(self.cache_dir, file_glob))
def fresh_cythonize(self, *args, **kwargs):
Cython.Utils.clear_function_caches()
Cython.Build.Dependencies._dep_tree = None # discard method caches
Cython.Build.Dependencies.cythonize(*args, **kwargs)
def test_cycache_switch(self):
content1 = 'value = 1\n'
content2 = 'value = 2\n'
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
with open(a_pyx, 'w') as f:
f.write(content1)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
self.assertEqual(1, len(self.cache_files('a.c*')))
with open(a_c) as f:
a_contents1 = f.read()
os.unlink(a_c)
with open(a_pyx, 'w') as f:
f.write(content2)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
with open(a_c) as f:
a_contents2 = f.read()
os.unlink(a_c)
self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!')
self.assertEqual(2, len(self.cache_files('a.c*')))
with open(a_pyx, 'w') as f:
f.write(content1)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
self.assertEqual(2, len(self.cache_files('a.c*')))
with open(a_c) as f:
a_contents = f.read()
self.assertEqual(
a_contents, a_contents1,
msg='\n'.join(list(difflib.unified_diff(
a_contents.split('\n'), a_contents1.split('\n')))[:10]))
def test_cycache_uses_cache(self):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
with open(a_pyx, 'w') as f:
f.write('pass')
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0])
gzip.GzipFile(a_cache, 'wb').write('fake stuff'.encode('ascii'))
os.unlink(a_c)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
with open(a_c) as f:
a_contents = f.read()
self.assertEqual(a_contents, 'fake stuff',
'Unexpected contents: %s...' % a_contents[:100])
def test_multi_file_output(self):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
a_h = a_pyx[:-4] + '.h'
a_api_h = a_pyx[:-4] + '_api.h'
with open(a_pyx, 'w') as f:
f.write('cdef public api int foo(int x): return x\n')
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
expected = [a_c, a_h, a_api_h]
for output in expected:
self.assertTrue(os.path.exists(output), output)
os.unlink(output)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
for output in expected:
self.assertTrue(os.path.exists(output), output)
def test_options_invalidation(self):
hash_pyx = os.path.join(self.src_dir, 'options.pyx')
hash_c = hash_pyx[:-len('.pyx')] + '.c'
with open(hash_pyx, 'w') as f:
f.write('pass')
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False)
self.assertEqual(1, len(self.cache_files('options.c*')))
os.unlink(hash_c)
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=True)
self.assertEqual(2, len(self.cache_files('options.c*')))
os.unlink(hash_c)
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False)
self.assertEqual(2, len(self.cache_files('options.c*')))
os.unlink(hash_c)
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True)
self.assertEqual(2, len(self.cache_files('options.c*')))
|
models/ClassicNetwork/blocks/inception_blocks.py | Dou-Yu-xuan/deep-learning-visal | 150 | 12655386 | # -*- coding:UTF-8 -*-
"""
implementation of Inception blocks with pytorch
@<NAME> 2020_09_011
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.blocks.conv_bn import BN_Conv2d
class Stem_v4_Res2(nn.Module):
"""
stem block for Inception-v4 and Inception-RestNet-v2
"""
def __init__(self):
super(Stem_v4_Res2, self).__init__()
self.step1 = nn.Sequential(
BN_Conv2d(3, 32, 3, 2, 0, bias=False),
BN_Conv2d(32, 32, 3, 1, 0, bias=False),
BN_Conv2d(32, 64, 3, 1, 1, bias=False)
)
self.step2_pool = nn.MaxPool2d(3, 2, 0)
self.step2_conv = BN_Conv2d(64, 96, 3, 2, 0, bias=False)
self.step3_1 = nn.Sequential(
BN_Conv2d(160, 64, 1, 1, 0, bias=False),
BN_Conv2d(64, 96, 3, 1, 0, bias=False)
)
self.step3_2 = nn.Sequential(
BN_Conv2d(160, 64, 1, 1, 0, bias=False),
BN_Conv2d(64, 64, (7, 1), (1, 1), (3, 0), bias=False),
BN_Conv2d(64, 64, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(64, 96, 3, 1, 0, bias=False)
)
self.step4_pool = nn.MaxPool2d(3, 2, 0)
self.step4_conv = BN_Conv2d(192, 192, 3, 2, 0, bias=False)
def forward(self, x):
out = self.step1(x)
tmp1 = self.step2_pool(out)
tmp2 = self.step2_conv(out)
out = torch.cat((tmp1, tmp2), 1)
tmp1 = self.step3_1(out)
tmp2 = self.step3_2(out)
out = torch.cat((tmp1, tmp2), 1)
tmp1 = self.step4_pool(out)
tmp2 = self.step4_conv(out)
print(tmp1.shape)
print(tmp2.shape)
out = torch.cat((tmp1, tmp2), 1)
return out
class Stem_Res1(nn.Module):
"""
stem block for Inception-ResNet-v1
"""
def __init__(self):
super(Stem_Res1, self).__init__()
self.stem = nn.Sequential(
BN_Conv2d(3, 32, 3, 2, 0, bias=False),
BN_Conv2d(32, 32, 3, 1, 0, bias=False),
BN_Conv2d(32, 64, 3, 1, 1, bias=False),
nn.MaxPool2d(3, 2, 0),
BN_Conv2d(64, 80, 1, 1, 0, bias=False),
BN_Conv2d(80, 192, 3, 1, 0, bias=False),
BN_Conv2d(192, 256, 3, 2, 0, bias=False)
)
def forward(self, x):
return self.stem(x)
class Inception_A(nn.Module):
"""
Inception-A block for Inception-v4 net
"""
def __init__(self, in_channels, b1, b2, b3_n1, b3_n3, b4_n1, b4_n3):
super(Inception_A, self).__init__()
self.branch1 = nn.Sequential(
nn.AvgPool2d(3, 1, 1),
BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
)
self.branch2 = BN_Conv2d(in_channels, b2, 1, 1, 0, bias=False)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n3, 3, 1, 1, bias=False)
)
self.branch4 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n3, 3, 1, 1, bias=False),
BN_Conv2d(b4_n3, b4_n3, 3, 1, 1, bias=False)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
return torch.cat((out1, out2, out3, out4), 1)
class Inception_B(nn.Module):
"""
Inception-B block for Inception-v4 net
"""
def __init__(self, in_channels, b1, b2, b3_n1, b3_n1x7, b3_n7x1, b4_n1, b4_n1x7_1,
b4_n7x1_1, b4_n1x7_2, b4_n7x1_2):
super(Inception_B, self).__init__()
self.branch1 = nn.Sequential(
nn.AvgPool2d(3, 1, 1),
BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
)
self.branch2 = BN_Conv2d(in_channels, b2, 1, 1, 0, bias=False)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n1x7, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b3_n1x7, b3_n7x1, (7, 1), (1, 1), (3, 0), bias=False)
)
self.branch4 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n1x7_1, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b4_n1x7_1, b4_n7x1_1, (7, 1), (1, 1), (3, 0), bias=False),
BN_Conv2d(b4_n7x1_1, b4_n1x7_2, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b4_n1x7_2, b4_n7x1_2, (7, 1), (1, 1), (3, 0), bias=False)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
return torch.cat((out1, out2, out3, out4), 1)
class Inception_C(nn.Module):
"""
Inception-C block for Inception-v4 net
"""
def __init__(self, in_channels, b1, b2, b3_n1, b3_n1x3_3x1, b4_n1,
b4_n1x3, b4_n3x1, b4_n1x3_3x1):
super(Inception_C, self).__init__()
self.branch1 = nn.Sequential(
nn.AvgPool2d(3, 1, 1),
BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
)
self.branch2 = BN_Conv2d(in_channels, b2, 1, 1, 0, bias=False)
self.branch3_1 = BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False)
self.branch3_1x3 = BN_Conv2d(b3_n1, b3_n1x3_3x1, (1, 3), (1, 1), (0, 1), bias=False)
self.branch3_3x1 = BN_Conv2d(b3_n1, b3_n1x3_3x1, (3, 1), (1, 1), (1, 0), bias=False)
self.branch4_1 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n1x3, (1, 3), (1, 1), (0, 1), bias=False),
BN_Conv2d(b4_n1x3, b4_n3x1, (3, 1), (1, 1), (1, 0), bias=False)
)
self.branch4_1x3 = BN_Conv2d(b4_n3x1, b4_n1x3_3x1, (1, 3), (1, 1), (0, 1), bias=False)
self.branch4_3x1 = BN_Conv2d(b4_n3x1, b4_n1x3_3x1, (3, 1), (1, 1), (1, 0), bias=False)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
tmp = self.branch3_1(x)
out3_1 = self.branch3_1x3(tmp)
out3_2 = self.branch3_3x1(tmp)
tmp = self.branch4_1(x)
out4_1 = self.branch4_1x3(tmp)
out4_2 = self.branch4_3x1(tmp)
return torch.cat((out1, out2, out3_1, out3_2, out4_1, out4_2), 1)
class Reduction_A(nn.Module):
"""
Reduction-A block for Inception-v4, Inception-ResNet-v1, Inception-ResNet-v2 nets
"""
def __init__(self, in_channels, k, l, m, n):
super(Reduction_A, self).__init__()
self.branch2 = BN_Conv2d(in_channels, n, 3, 2, 0, bias=False)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, k, 1, 1, 0, bias=False),
BN_Conv2d(k, l, 3, 1, 1, bias=False),
BN_Conv2d(l, m, 3, 2, 0, bias=False)
)
def forward(self, x):
out1 = F.max_pool2d(x, 3, 2, 0)
out2 = self.branch2(x)
out3 = self.branch3(x)
return torch.cat((out1, out2, out3), 1)
class Reduction_B_v4(nn.Module):
"""
Reduction-B block for Inception-v4 net
"""
def __init__(self, in_channels, b2_n1, b2_n3, b3_n1, b3_n1x7, b3_n7x1, b3_n3):
super(Reduction_B_v4, self).__init__()
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n3, 3, 2, 0, bias=False)
)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n1x7, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b3_n1x7, b3_n7x1, (7, 1), (1, 1), (3, 0), bias=False),
BN_Conv2d(b3_n7x1, b3_n3, 3, 2, 0, bias=False)
)
def forward(self, x):
out1 = F.max_pool2d(x, 3, 2, 0)
out2 = self.branch2(x)
out3 = self.branch3(x)
return torch.cat((out1, out2, out3), 1)
class Reduction_B_Res(nn.Module):
"""
Reduction-B block for Inception-ResNet-v1 \
and Inception-ResNet-v1 net
"""
def __init__(self, in_channels, b2_n1, b2_n3, b3_n1, b3_n3, b4_n1, b4_n3_1, b4_n3_2):
super(Reduction_B_Res, self).__init__()
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n3, 3, 2, 0, bias=False),
)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n3, 3, 2, 0, bias=False)
)
self.branch4 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n3_1, 3, 1, 1, bias=False),
BN_Conv2d(b4_n3_1, b4_n3_2, 3, 2, 0, bias=False)
)
def forward(self, x):
out1 = F.max_pool2d(x, 3, 2, 0)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
return torch.cat((out1, out2, out3, out4), 1)
class Inception_A_res(nn.Module):
"""
Inception-A block for Inception-ResNet-v1\
and Inception-ResNet-v2 net
"""
def __init__(self, in_channels, b1, b2_n1, b2_n3, b3_n1, b3_n3_1, b3_n3_2, n1_linear):
super(Inception_A_res, self).__init__()
self.branch1 = BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n3, 3, 1, 1, bias=False),
)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n3_1, 3, 1, 1, bias=False),
BN_Conv2d(b3_n3_1, b3_n3_2, 3, 1, 1, bias=False)
)
self.conv_linear = nn.Conv2d(b1 + b2_n3 + b3_n3_2, n1_linear, 1, 1, 0, bias=True)
self.short_cut = nn.Sequential()
if in_channels != n1_linear:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channels, n1_linear, 1, 1, 0, bias=False),
nn.BatchNorm2d(n1_linear)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out = torch.cat((out1, out2, out3), 1)
out = self.conv_linear(out)
out += self.short_cut(x)
return F.relu(out)
class Inception_B_res(nn.Module):
"""
Inception-A block for Inception-ResNet-v1\
and Inception-ResNet-v2 net
"""
def __init__(self, in_channels, b1, b2_n1, b2_n1x7, b2_n7x1, n1_linear):
super(Inception_B_res, self).__init__()
self.branch1 = BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n1x7, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b2_n1x7, b2_n7x1, (7, 1), (1, 1), (3, 0), bias=False)
)
self.conv_linear = nn.Conv2d(b1 + b2_n7x1, n1_linear, 1, 1, 0, bias=False)
self.short_cut = nn.Sequential()
if in_channels != n1_linear:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channels, n1_linear, 1, 1, 0, bias=False),
nn.BatchNorm2d(n1_linear)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out = torch.cat((out1, out2), 1)
out = self.conv_linear(out)
out += self.short_cut(x)
return F.relu(out)
class Inception_C_res(nn.Module):
"""
Inception-C block for Inception-ResNet-v1\
and Inception-ResNet-v2 net
"""
def __init__(self, in_channels, b1, b2_n1, b2_n1x3, b2_n3x1, n1_linear):
super(Inception_C_res, self).__init__()
self.branch1 = BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n1x3, (1, 3), (1, 1), (0, 1), bias=False),
BN_Conv2d(b2_n1x3, b2_n3x1, (3, 1), (1, 1), (1, 0), bias=False)
)
self.conv_linear = nn.Conv2d(b1 + b2_n3x1, n1_linear, 1, 1, 0, bias=False)
self.short_cut = nn.Sequential()
if in_channels != n1_linear:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channels, n1_linear, 1, 1, 0, bias=False),
nn.BatchNorm2d(n1_linear)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out = torch.cat((out1, out2), 1)
out = self.conv_linear(out)
out += self.short_cut(x)
return F.relu(out)
|
django_email_verification/views.py | eborchert/django-email-validation | 208 | 12655401 | from django.conf import settings
from django.shortcuts import render
from .confirm import verify_token, verify_view
from .errors import NotAllFieldCompiled
@verify_view
def verify(request, token):
try:
template = settings.EMAIL_PAGE_TEMPLATE
if not isinstance(template, str):
raise AttributeError
success, user = verify_token(token)
return render(request, template, {'success': success, 'user': user, 'request': request})
except AttributeError:
raise NotAllFieldCompiled('EMAIL_PAGE_TEMPLATE field not found')
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CreateAutoProvisioningGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12655410 | <filename>aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CreateAutoProvisioningGroupRequest.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateAutoProvisioningGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateAutoProvisioningGroup','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LaunchConfigurationDataDisks(self): # RepeatList
return self.get_query_params().get('LaunchConfiguration.DataDisk')
def set_LaunchConfigurationDataDisks(self, LaunchConfigurationDataDisk): # RepeatList
for depth1 in range(len(LaunchConfigurationDataDisk)):
if LaunchConfigurationDataDisk[depth1].get('PerformanceLevel') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.PerformanceLevel', LaunchConfigurationDataDisk[depth1].get('PerformanceLevel'))
if LaunchConfigurationDataDisk[depth1].get('KmsKeyId') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.KmsKeyId', LaunchConfigurationDataDisk[depth1].get('KmsKeyId'))
if LaunchConfigurationDataDisk[depth1].get('Description') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.Description', LaunchConfigurationDataDisk[depth1].get('Description'))
if LaunchConfigurationDataDisk[depth1].get('SnapshotId') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.SnapshotId', LaunchConfigurationDataDisk[depth1].get('SnapshotId'))
if LaunchConfigurationDataDisk[depth1].get('Size') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.Size', LaunchConfigurationDataDisk[depth1].get('Size'))
if LaunchConfigurationDataDisk[depth1].get('Device') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.Device', LaunchConfigurationDataDisk[depth1].get('Device'))
if LaunchConfigurationDataDisk[depth1].get('DiskName') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.DiskName', LaunchConfigurationDataDisk[depth1].get('DiskName'))
if LaunchConfigurationDataDisk[depth1].get('Category') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.Category', LaunchConfigurationDataDisk[depth1].get('Category'))
if LaunchConfigurationDataDisk[depth1].get('DeleteWithInstance') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.DeleteWithInstance', LaunchConfigurationDataDisk[depth1].get('DeleteWithInstance'))
if LaunchConfigurationDataDisk[depth1].get('Encrypted') is not None:
self.add_query_param('LaunchConfiguration.DataDisk.' + str(depth1 + 1) + '.Encrypted', LaunchConfigurationDataDisk[depth1].get('Encrypted'))
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_LaunchConfigurationSystemDiskCategory(self): # String
return self.get_query_params().get('LaunchConfiguration.SystemDiskCategory')
def set_LaunchConfigurationSystemDiskCategory(self, LaunchConfigurationSystemDiskCategory): # String
self.add_query_param('LaunchConfiguration.SystemDiskCategory', LaunchConfigurationSystemDiskCategory)
def get_AutoProvisioningGroupType(self): # String
return self.get_query_params().get('AutoProvisioningGroupType')
def set_AutoProvisioningGroupType(self, AutoProvisioningGroupType): # String
self.add_query_param('AutoProvisioningGroupType', AutoProvisioningGroupType)
def get_LaunchConfigurationSystemDiskPerformanceLevel(self): # String
return self.get_query_params().get('LaunchConfiguration.SystemDiskPerformanceLevel')
def set_LaunchConfigurationSystemDiskPerformanceLevel(self, LaunchConfigurationSystemDiskPerformanceLevel): # String
self.add_query_param('LaunchConfiguration.SystemDiskPerformanceLevel', LaunchConfigurationSystemDiskPerformanceLevel)
def get_LaunchConfigurationHostNamess(self): # RepeatList
return self.get_query_params().get('LaunchConfiguration.HostNames')
def set_LaunchConfigurationHostNamess(self, LaunchConfigurationHostNames): # RepeatList
for depth1 in range(len(LaunchConfigurationHostNames)):
self.add_query_param('LaunchConfiguration.HostNames.' + str(depth1 + 1), LaunchConfigurationHostNames[depth1])
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_LaunchConfigurationImageId(self): # String
return self.get_query_params().get('LaunchConfiguration.ImageId')
def set_LaunchConfigurationImageId(self, LaunchConfigurationImageId): # String
self.add_query_param('LaunchConfiguration.ImageId', LaunchConfigurationImageId)
def get_LaunchConfigurationResourceGroupId(self): # String
return self.get_query_params().get('LaunchConfiguration.ResourceGroupId')
def set_LaunchConfigurationResourceGroupId(self, LaunchConfigurationResourceGroupId): # String
self.add_query_param('LaunchConfiguration.ResourceGroupId', LaunchConfigurationResourceGroupId)
def get_PayAsYouGoAllocationStrategy(self): # String
return self.get_query_params().get('PayAsYouGoAllocationStrategy')
def set_PayAsYouGoAllocationStrategy(self, PayAsYouGoAllocationStrategy): # String
self.add_query_param('PayAsYouGoAllocationStrategy', PayAsYouGoAllocationStrategy)
def get_DefaultTargetCapacityType(self): # String
return self.get_query_params().get('DefaultTargetCapacityType')
def set_DefaultTargetCapacityType(self, DefaultTargetCapacityType): # String
self.add_query_param('DefaultTargetCapacityType', DefaultTargetCapacityType)
def get_LaunchConfigurationKeyPairName(self): # String
return self.get_query_params().get('LaunchConfiguration.KeyPairName')
def set_LaunchConfigurationKeyPairName(self, LaunchConfigurationKeyPairName): # String
self.add_query_param('LaunchConfiguration.KeyPairName', LaunchConfigurationKeyPairName)
def get_SystemDiskConfigs(self): # RepeatList
return self.get_query_params().get('SystemDiskConfig')
def set_SystemDiskConfigs(self, SystemDiskConfig): # RepeatList
for depth1 in range(len(SystemDiskConfig)):
if SystemDiskConfig[depth1].get('DiskCategory') is not None:
self.add_query_param('SystemDiskConfig.' + str(depth1 + 1) + '.DiskCategory', SystemDiskConfig[depth1].get('DiskCategory'))
def get_DataDiskConfigs(self): # RepeatList
return self.get_query_params().get('DataDiskConfig')
def set_DataDiskConfigs(self, DataDiskConfig): # RepeatList
for depth1 in range(len(DataDiskConfig)):
if DataDiskConfig[depth1].get('DiskCategory') is not None:
self.add_query_param('DataDiskConfig.' + str(depth1 + 1) + '.DiskCategory', DataDiskConfig[depth1].get('DiskCategory'))
def get_ValidUntil(self): # String
return self.get_query_params().get('ValidUntil')
def set_ValidUntil(self, ValidUntil): # String
self.add_query_param('ValidUntil', ValidUntil)
def get_LaunchTemplateId(self): # String
return self.get_query_params().get('LaunchTemplateId')
def set_LaunchTemplateId(self, LaunchTemplateId): # String
self.add_query_param('LaunchTemplateId', LaunchTemplateId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_LaunchConfigurationSystemDiskSize(self): # Integer
return self.get_query_params().get('LaunchConfiguration.SystemDiskSize')
def set_LaunchConfigurationSystemDiskSize(self, LaunchConfigurationSystemDiskSize): # Integer
self.add_query_param('LaunchConfiguration.SystemDiskSize', LaunchConfigurationSystemDiskSize)
def get_LaunchConfigurationInternetMaxBandwidthOut(self): # Integer
return self.get_query_params().get('LaunchConfiguration.InternetMaxBandwidthOut')
def set_LaunchConfigurationInternetMaxBandwidthOut(self, LaunchConfigurationInternetMaxBandwidthOut): # Integer
self.add_query_param('LaunchConfiguration.InternetMaxBandwidthOut', LaunchConfigurationInternetMaxBandwidthOut)
def get_LaunchConfigurationHostName(self): # String
return self.get_query_params().get('LaunchConfiguration.HostName')
def set_LaunchConfigurationHostName(self, LaunchConfigurationHostName): # String
self.add_query_param('LaunchConfiguration.HostName', LaunchConfigurationHostName)
def get_MinTargetCapacity(self): # String
return self.get_query_params().get('MinTargetCapacity')
def set_MinTargetCapacity(self, MinTargetCapacity): # String
self.add_query_param('MinTargetCapacity', MinTargetCapacity)
def get_MaxSpotPrice(self): # Float
return self.get_query_params().get('MaxSpotPrice')
def set_MaxSpotPrice(self, MaxSpotPrice): # Float
self.add_query_param('MaxSpotPrice', MaxSpotPrice)
def get_LaunchConfigurationPasswordInherit(self): # Boolean
return self.get_query_params().get('LaunchConfiguration.PasswordInherit')
def set_LaunchConfigurationPasswordInherit(self, LaunchConfigurationPasswordInherit): # Boolean
self.add_query_param('LaunchConfiguration.PasswordInherit', LaunchConfigurationPasswordInherit)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_LaunchConfigurationSecurityGroupId(self): # String
return self.get_query_params().get('LaunchConfiguration.SecurityGroupId')
def set_LaunchConfigurationSecurityGroupId(self, LaunchConfigurationSecurityGroupId): # String
self.add_query_param('LaunchConfiguration.SecurityGroupId', LaunchConfigurationSecurityGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_TerminateInstancesWithExpiration(self): # Boolean
return self.get_query_params().get('TerminateInstancesWithExpiration')
def set_TerminateInstancesWithExpiration(self, TerminateInstancesWithExpiration): # Boolean
self.add_query_param('TerminateInstancesWithExpiration', TerminateInstancesWithExpiration)
def get_LaunchConfigurationUserData(self): # String
return self.get_query_params().get('LaunchConfiguration.UserData')
def set_LaunchConfigurationUserData(self, LaunchConfigurationUserData): # String
self.add_query_param('LaunchConfiguration.UserData', LaunchConfigurationUserData)
def get_LaunchConfigurationCreditSpecification(self): # String
return self.get_query_params().get('LaunchConfiguration.CreditSpecification')
def set_LaunchConfigurationCreditSpecification(self, LaunchConfigurationCreditSpecification): # String
self.add_query_param('LaunchConfiguration.CreditSpecification', LaunchConfigurationCreditSpecification)
def get_LaunchConfigurationInstanceName(self): # String
return self.get_query_params().get('LaunchConfiguration.InstanceName')
def set_LaunchConfigurationInstanceName(self, LaunchConfigurationInstanceName): # String
self.add_query_param('LaunchConfiguration.InstanceName', LaunchConfigurationInstanceName)
def get_LaunchConfigurationInstanceDescription(self): # String
return self.get_query_params().get('LaunchConfiguration.InstanceDescription')
def set_LaunchConfigurationInstanceDescription(self, LaunchConfigurationInstanceDescription): # String
self.add_query_param('LaunchConfiguration.InstanceDescription', LaunchConfigurationInstanceDescription)
def get_SpotAllocationStrategy(self): # String
return self.get_query_params().get('SpotAllocationStrategy')
def set_SpotAllocationStrategy(self, SpotAllocationStrategy): # String
self.add_query_param('SpotAllocationStrategy', SpotAllocationStrategy)
def get_TerminateInstances(self): # Boolean
return self.get_query_params().get('TerminateInstances')
def set_TerminateInstances(self, TerminateInstances): # Boolean
self.add_query_param('TerminateInstances', TerminateInstances)
def get_LaunchConfigurationSystemDiskName(self): # String
return self.get_query_params().get('LaunchConfiguration.SystemDiskName')
def set_LaunchConfigurationSystemDiskName(self, LaunchConfigurationSystemDiskName): # String
self.add_query_param('LaunchConfiguration.SystemDiskName', LaunchConfigurationSystemDiskName)
def get_LaunchConfigurationSystemDiskDescription(self): # String
return self.get_query_params().get('LaunchConfiguration.SystemDiskDescription')
def set_LaunchConfigurationSystemDiskDescription(self, LaunchConfigurationSystemDiskDescription): # String
self.add_query_param('LaunchConfiguration.SystemDiskDescription', LaunchConfigurationSystemDiskDescription)
def get_ExcessCapacityTerminationPolicy(self): # String
return self.get_query_params().get('ExcessCapacityTerminationPolicy')
def set_ExcessCapacityTerminationPolicy(self, ExcessCapacityTerminationPolicy): # String
self.add_query_param('ExcessCapacityTerminationPolicy', ExcessCapacityTerminationPolicy)
def get_LaunchTemplateConfigs(self): # RepeatList
return self.get_query_params().get('LaunchTemplateConfig')
def set_LaunchTemplateConfigs(self, LaunchTemplateConfig): # RepeatList
for depth1 in range(len(LaunchTemplateConfig)):
if LaunchTemplateConfig[depth1].get('VSwitchId') is not None:
self.add_query_param('LaunchTemplateConfig.' + str(depth1 + 1) + '.VSwitchId', LaunchTemplateConfig[depth1].get('VSwitchId'))
if LaunchTemplateConfig[depth1].get('MaxPrice') is not None:
self.add_query_param('LaunchTemplateConfig.' + str(depth1 + 1) + '.MaxPrice', LaunchTemplateConfig[depth1].get('MaxPrice'))
if LaunchTemplateConfig[depth1].get('Priority') is not None:
self.add_query_param('LaunchTemplateConfig.' + str(depth1 + 1) + '.Priority', LaunchTemplateConfig[depth1].get('Priority'))
if LaunchTemplateConfig[depth1].get('InstanceType') is not None:
self.add_query_param('LaunchTemplateConfig.' + str(depth1 + 1) + '.InstanceType', LaunchTemplateConfig[depth1].get('InstanceType'))
if LaunchTemplateConfig[depth1].get('WeightedCapacity') is not None:
self.add_query_param('LaunchTemplateConfig.' + str(depth1 + 1) + '.WeightedCapacity', LaunchTemplateConfig[depth1].get('WeightedCapacity'))
def get_LaunchConfigurationRamRoleName(self): # String
return self.get_query_params().get('LaunchConfiguration.RamRoleName')
def set_LaunchConfigurationRamRoleName(self, LaunchConfigurationRamRoleName): # String
self.add_query_param('LaunchConfiguration.RamRoleName', LaunchConfigurationRamRoleName)
def get_LaunchConfigurationInternetMaxBandwidthIn(self): # Integer
return self.get_query_params().get('LaunchConfiguration.InternetMaxBandwidthIn')
def set_LaunchConfigurationInternetMaxBandwidthIn(self, LaunchConfigurationInternetMaxBandwidthIn): # Integer
self.add_query_param('LaunchConfiguration.InternetMaxBandwidthIn', LaunchConfigurationInternetMaxBandwidthIn)
def get_SpotInstanceInterruptionBehavior(self): # String
return self.get_query_params().get('SpotInstanceInterruptionBehavior')
def set_SpotInstanceInterruptionBehavior(self, SpotInstanceInterruptionBehavior): # String
self.add_query_param('SpotInstanceInterruptionBehavior', SpotInstanceInterruptionBehavior)
def get_LaunchConfigurationSecurityEnhancementStrategy(self): # String
return self.get_query_params().get('LaunchConfiguration.SecurityEnhancementStrategy')
def set_LaunchConfigurationSecurityEnhancementStrategy(self, LaunchConfigurationSecurityEnhancementStrategy): # String
self.add_query_param('LaunchConfiguration.SecurityEnhancementStrategy', LaunchConfigurationSecurityEnhancementStrategy)
def get_LaunchConfigurationTags(self): # RepeatList
return self.get_query_params().get('LaunchConfiguration.Tag')
def set_LaunchConfigurationTags(self, LaunchConfigurationTag): # RepeatList
for depth1 in range(len(LaunchConfigurationTag)):
if LaunchConfigurationTag[depth1].get('Key') is not None:
self.add_query_param('LaunchConfiguration.Tag.' + str(depth1 + 1) + '.Key', LaunchConfigurationTag[depth1].get('Key'))
if LaunchConfigurationTag[depth1].get('Value') is not None:
self.add_query_param('LaunchConfiguration.Tag.' + str(depth1 + 1) + '.Value', LaunchConfigurationTag[depth1].get('Value'))
def get_LaunchConfigurationDeploymentSetId(self): # String
return self.get_query_params().get('LaunchConfiguration.DeploymentSetId')
def set_LaunchConfigurationDeploymentSetId(self, LaunchConfigurationDeploymentSetId): # String
self.add_query_param('LaunchConfiguration.DeploymentSetId', LaunchConfigurationDeploymentSetId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SpotInstancePoolsToUseCount(self): # Integer
return self.get_query_params().get('SpotInstancePoolsToUseCount')
def set_SpotInstancePoolsToUseCount(self, SpotInstancePoolsToUseCount): # Integer
self.add_query_param('SpotInstancePoolsToUseCount', SpotInstancePoolsToUseCount)
def get_LaunchConfigurationInternetChargeType(self): # String
return self.get_query_params().get('LaunchConfiguration.InternetChargeType')
def set_LaunchConfigurationInternetChargeType(self, LaunchConfigurationInternetChargeType): # String
self.add_query_param('LaunchConfiguration.InternetChargeType', LaunchConfigurationInternetChargeType)
def get_LaunchTemplateVersion(self): # String
return self.get_query_params().get('LaunchTemplateVersion')
def set_LaunchTemplateVersion(self, LaunchTemplateVersion): # String
self.add_query_param('LaunchTemplateVersion', LaunchTemplateVersion)
def get_LaunchConfigurationIoOptimized(self): # String
return self.get_query_params().get('LaunchConfiguration.IoOptimized')
def set_LaunchConfigurationIoOptimized(self, LaunchConfigurationIoOptimized): # String
self.add_query_param('LaunchConfiguration.IoOptimized', LaunchConfigurationIoOptimized)
def get_PayAsYouGoTargetCapacity(self): # String
return self.get_query_params().get('PayAsYouGoTargetCapacity')
def set_PayAsYouGoTargetCapacity(self, PayAsYouGoTargetCapacity): # String
self.add_query_param('PayAsYouGoTargetCapacity', PayAsYouGoTargetCapacity)
def get_TotalTargetCapacity(self): # String
return self.get_query_params().get('TotalTargetCapacity')
def set_TotalTargetCapacity(self, TotalTargetCapacity): # String
self.add_query_param('TotalTargetCapacity', TotalTargetCapacity)
def get_SpotTargetCapacity(self): # String
return self.get_query_params().get('SpotTargetCapacity')
def set_SpotTargetCapacity(self, SpotTargetCapacity): # String
self.add_query_param('SpotTargetCapacity', SpotTargetCapacity)
def get_ValidFrom(self): # String
return self.get_query_params().get('ValidFrom')
def set_ValidFrom(self, ValidFrom): # String
self.add_query_param('ValidFrom', ValidFrom)
def get_AutoProvisioningGroupName(self): # String
return self.get_query_params().get('AutoProvisioningGroupName')
def set_AutoProvisioningGroupName(self, AutoProvisioningGroupName): # String
self.add_query_param('AutoProvisioningGroupName', AutoProvisioningGroupName)
|
examples/federated_learning/surface_defect_detection_v2/aggregate.py | hithxh/sedna | 311 | 12655416 | <gh_stars>100-1000
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from interface import fedavg, s3_transmitter, simple_chooser
from interface import Estimator
from sedna.service.server import AggregationServerV2
from sedna.common.config import BaseConfig
def run_server():
estimator = Estimator()
estimator.saved = BaseConfig.model_url
server = AggregationServerV2(
data=None, # mistnet, train, test
estimator=estimator,
aggregation=fedavg,
transmitter=s3_transmitter,
chooser=simple_chooser)
server.start()
if __name__ == '__main__':
run_server()
|
基础教程/A2-神经网络基本原理/第5步 - 非线性分类/src/ch10-NonLinearBinaryClassification/HelperClass2/ClassifierFunction_2_0.py | microsoft/ai-edu | 11,094 | 12655435 | <reponame>microsoft/ai-edu<gh_stars>1000+
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
"""
Version 2.0
"""
import numpy as np
class CClassifier(object):
def forward(self, z):
pass
# equal to sigmoid but it is used as classification function
class Logistic(CClassifier):
def forward(self, z):
a = 1.0 / (1.0 + np.exp(-z))
return a
class Softmax(CClassifier):
def forward(self, z):
shift_z = z - np.max(z, axis=1, keepdims=True)
exp_z = np.exp(shift_z)
a = exp_z / np.sum(exp_z, axis=1, keepdims=True)
return a
|
cookie.py | playplaying/NoXss | 401 | 12655496 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Do some work about cookie"""
import os
import re
import time
from config import COOKIE_DIR
from log import LOGGER
__author__ = 'longwenzhang'
def is_ip(domain):
if re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}',domain):
return True
# get cookie for browser
def get_cookies_list(target_domain):
if '.' in target_domain:
cookies_list = []
# if the domain is IP
if is_ip(target_domain):
domain_scope = target_domain
else:
# default
domain_scope = '.' + target_domain.split('.')[-2] + '.' + target_domain.split('.')[-1]
cookie_file_path = os.path.join(COOKIE_DIR, '_'.join([domain_scope, 'cookie']))
if os.path.exists(cookie_file_path):
with open(cookie_file_path, "r")as cookie_file:
cookie_file_list = cookie_file.readlines()
expire = cookie_file_list[2]
# check expire
if int(time.time()) < int(expire):
cookies_text = cookie_file_list[0].strip()
domain = cookie_file_list[1].strip()
new_list = cookies_text.split(';')
for i in new_list:
if i != '':
cookie_dict = {}
key = i.split('=')[0].strip()
value = i.split('=')[1].strip()
cookie_dict['domain'] = domain
cookie_dict['name'] = key
cookie_dict['value'] = value
cookie_dict['path'] = '/'
cookies_list.append(cookie_dict)
return cookies_list
# save cookie default expire=3600s
def save_cookie(cookie,domain,expire_time=3600):
domain_scope='.'+domain.split('.')[-2]+'.'+domain.split('.')[-1]
expire=int(time.time())+expire_time
with open(os.path.join(COOKIE_DIR,'_'.join([domain_scope,'cookie'])), 'w+')as cookie_file:
cookie_file.write(cookie + '\n')
cookie_file.write(domain_scope+'\n')
cookie_file.write(str(expire))
# save cookie for http://ip/path
def save_cookie_ip(cookie,ip,expire_time=3600):
domain_scope=ip
expire=int(time.time())+expire_time
with open(os.path.join(COOKIE_DIR,'_'.join([domain_scope,'cookie'])), 'w+')as cookie_file:
cookie_file.write(cookie + '\n')
cookie_file.write(domain_scope+'\n')
cookie_file.write(str(expire))
# get cookie
def get_cookie(target_domain,):
if '.' in target_domain:
domain_scope = '.' + target_domain.split('.')[-2] + '.' + target_domain.split('.')[-1]
cookie_file_path = os.path.join(COOKIE_DIR, '_'.join([domain_scope, 'cookie']))
if os.path.exists(cookie_file_path):
with open(cookie_file_path, "r")as cookie_file:
cookie_file_list = cookie_file.readlines()
expire = cookie_file_list[2]
# check expire
if int(time.time()) < int(expire):
cookies_text = cookie_file_list[0].strip()
return cookies_text
else:
LOGGER.warn('Cookie of %s is expired!!!' % domain_scope)
# cookie not exists
else:
pass
# get cookie-ip
def get_cookie_ip(ip,):
domain_scope = ip
cookie_file_path = os.path.join(COOKIE_DIR, '_'.join([domain_scope, 'cookie']))
if os.path.exists(cookie_file_path):
with open(cookie_file_path, "r")as cookie_file:
cookie_file_list = cookie_file.readlines()
expire = cookie_file_list[2]
# check expire
if int(time.time()) < int(expire):
cookies_text = cookie_file_list[0].strip()
return cookies_text
else:
LOGGER.warn('Cookie of %s is expired!!!' % domain_scope)
else:
pass
def try_cookie(domain):
# try to find cookie from cookie/ and add it to DEFAULT_HEADER
cookie = get_cookie(domain)
if cookie:
choose = raw_input('\033[1;32m{}\033[0m'.format("Cookie of %s is found in ./cookie/,Do you want to use it? (y/n)"%domain))
if choose == 'y' or choose == 'yes' or choose=='':
return cookie
if __name__=='__main__':
pass
|
generator/tests/documenting_language_model_test.py | romulobusatto/google-api-php-client-services | 709 | 12655534 | #!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for language_model.DocumentingLanguageModel."""
__author__ = '<EMAIL> (<NAME>)'
from google.apputils import basetest
from googleapis.codegen import language_model
class DocumentingLanguageModelTest(basetest.TestCase):
def testDocumentingLanguageModel(self):
dlm = language_model.DocumentingLanguageModel()
self.assertEquals('Array<foo>', dlm.ArrayOf(None, 'foo'))
self.assertEquals('Map<string, foo>', dlm.MapOf(None, 'foo'))
self.assertEquals('foo', dlm.GetCodeTypeFromDictionary({'type': 'foo'}))
self.assertEquals('foo (int)', dlm.GetCodeTypeFromDictionary({
'type': 'foo', 'format': 'int'}))
if __name__ == '__main__':
basetest.main()
|
config_utils.py | snakeztc/NeuralDialog-CVAE | 329 | 12655543 | # Copyright (C) 2017 <NAME>, Carnegie Mellon University
class KgCVAEConfig(object):
description= None
use_hcf = True # use dialog act in training (if turn off kgCVAE -> CVAE)
update_limit = 3000 # the number of mini-batch before evaluating the model
# how to encode utterance.
# bow: add word embedding together
# rnn: RNN utterance encoder
# bi_rnn: bi_directional RNN utterance encoder
sent_type = "bi_rnn"
# latent variable (gaussian variable)
latent_size = 200 # the dimension of latent variable
full_kl_step = 10000 # how many batch before KL cost weight reaches 1.0
dec_keep_prob = 1.0 # do we use word drop decoder [Bowman el al 2015]
# Network general
cell_type = "gru" # gru or lstm
embed_size = 200 # word embedding size
topic_embed_size = 30 # topic embedding size
da_embed_size = 30 # dialog act embedding size
cxt_cell_size = 600 # context encoder hidden size
sent_cell_size = 300 # utterance encoder hidden size
dec_cell_size = 400 # response decoder hidden size
backward_size = 10 # how many utterance kept in the context window
step_size = 1 # internal usage
max_utt_len = 40 # max number of words in an utterance
num_layer = 1 # number of context RNN layers
# Optimization parameters
op = "adam"
grad_clip = 5.0 # gradient abs max cut
init_w = 0.08 # uniform random from [-init_w, init_w]
batch_size = 30 # mini-batch size
init_lr = 0.001 # initial learning rate
lr_hold = 1 # only used by SGD
lr_decay = 0.6 # only used by SGD
keep_prob = 1.0 # drop out rate
improve_threshold = 0.996 # for early stopping
patient_increase = 2.0 # for early stopping
early_stop = True
max_epoch = 60 # max number of epoch of training
grad_noise = 0.0 # inject gradient noise?
|
tools/c7n_openstack/c7n_openstack/resources/server.py | al3pht/cloud-custodian | 2,415 | 12655544 | from c7n_openstack.query import QueryResourceManager, TypeInfo
from c7n_openstack.provider import resources
from c7n.utils import local_session
from c7n.utils import type_schema
from c7n.filters import Filter
from c7n.filters import AgeFilter
@resources.register('server')
class Server(QueryResourceManager):
class resource_type(TypeInfo):
enum_spec = ('list_servers', None)
id = 'id'
name = 'name'
set_server_metadata = "set_server_metadata"
delete_server_metadata = "delete_server_metadata"
add_server_tag = "add_server_tag"
set_server_tag = "set_server_tag"
delete_server_tag = "delete_server_tag"
default_report_fields = ['id', 'name', 'status', 'tenant_id']
@Server.filter_registry.register('image')
class ImageFilter(Filter):
"""Filters Servers based on their image attributes
:example:
.. code-block:: yaml
policies:
- name: dns-hostname-enabled
resource: vpc
filters:
- type: image
image_name: test-image
"""
schema = type_schema(
'image',
image_name={'type': 'string'},
visibility={'type': 'string'},
status={'type': 'string'})
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client()
image_name = self.data.get('image_name', None)
visibility = self.data.get('visibility', None)
status = self.data.get('status', None)
images = client.list_images()
for r in resources:
image = find_object_by_property(images, 'id', r.image.id)
matched = True
if not image:
if status == "absent":
results.append(r)
continue
if image_name is not None and image_name != image.name:
matched = False
if visibility is not None and visibility != image.visibility:
matched = False
if status is not None and status != image.status:
matched = False
if matched:
results.append(r)
return results
@Server.filter_registry.register('flavor')
class FlavorFilter(Filter):
"""Filters Servers based on their flavor attributes
:example:
.. code-block:: yaml
policies:
- name: dns-hostname-enabled
resource: openstack.server
filters:
- type: flavor
flavor_name: m1.tiny
"""
schema = type_schema(
'flavor',
flavor_name={'type': 'string'},
flavor_id={'type': 'string'},
vcpus={'type': 'integer'},
ram={'type': 'integer'},
swap={'type': 'integer'},
disk={'type': 'integer'},
ephemeral={'type': 'integer'},
is_public={'type': 'boolean'},
)
def server_match_flavor(self, server, flavor_name, flavor_id,
vcpus, ram, disk, ephemeral, is_public):
openstack = local_session(self.manager.session_factory).client()
server_flavor_name = server.flavor.original_name
flavor = openstack.get_flavor(server_flavor_name)
if not flavor:
return False
if flavor_name and flavor.name != flavor_name:
return False
if flavor_id and flavor.id != flavor_id:
return False
if vcpus and flavor.vcpus != int(vcpus):
return False
if ram and flavor.ram != int(ram):
return False
if disk and flavor.disk != int(disk):
return False
if ephemeral and flavor.ephemeral != int(ephemeral):
return False
if is_public is not None and flavor.is_public != is_public:
return False
return True
def process(self, resources, event=None):
results = []
flavor_name = self.data.get('flavor_name', None)
flavor_id = self.data.get('flavor_id', None)
vcpus = self.data.get('vcpus', None)
ram = self.data.get('ram', None)
disk = self.data.get('disk', None)
ephemeral = self.data.get('ephemeral', None)
is_public = self.data.get('is_public', None)
for server in resources:
if self.server_match_flavor(server, flavor_name, flavor_id,
vcpus, ram, disk, ephemeral,
is_public):
results.append(server)
return results
@Server.filter_registry.register('age')
class AgeFilter(AgeFilter):
date_attribute = "launched_at"
schema = type_schema(
'age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'},
hours={'type': 'number'},
minutes={'type': 'number'})
def get_resource_data(self, i):
if i.get("launched_at"):
return i.get("launched_at")
return i.get("created_at")
@Server.filter_registry.register('tags')
class TagsFilter(Filter):
"""Filters Servers based on their tags
:example:
.. code-block:: yaml
policies:
- name: demo
resource: openstack.server
filters:
- type: tags
tags:
- key: a
value: b
"""
tags_definition = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'string'},
'value': {'type': 'string'}
},
'required': ['key', 'value'],
}
}
schema = type_schema(
'tags',
tags=tags_definition,
op={'type': 'string', 'enum': ['any', 'all']},
)
def match_any_tags(self, server, tags):
for t in tags:
str_tag = "%s=%s" % (t.get('key'), t.get('value'))
if str_tag in server.tags:
return True
return False
def match_all_tags(self, server, tags):
for t in tags:
str_tag = "%s=%s" % (t.get('key'), t.get('value'))
if str_tag not in server.tags:
return False
return True
def process(self, resources, event=None):
results = []
tags = self.data.get('tags', [])
op = self.data.get('op', 'all')
match_fn = {
'any': self.match_any_tags,
'all': self.match_all_tags
}
for server in resources:
if match_fn[op](server, tags):
results.append(server)
return results
def find_object_by_property(collection, k, v):
result = []
for d in collection:
if hasattr(d, k):
value = getattr(d, k)
else:
value = d.get(k)
if (v is None and value is None) or value == v:
result.append(d)
if not result:
return None
assert(len(result) == 1)
return result[0]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.