hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b95f2f6c2258ef8998ac2a053019013dbf870640 | 2,351 | py | Python | account/views.py | KimSoungRyoul/drf_unitteset_study_project | 9a0d824bdc6343eeba6209299c077a6e9d280516 | [
"MIT"
] | null | null | null | account/views.py | KimSoungRyoul/drf_unitteset_study_project | 9a0d824bdc6343eeba6209299c077a6e9d280516 | [
"MIT"
] | null | null | null | account/views.py | KimSoungRyoul/drf_unitteset_study_project | 9a0d824bdc6343eeba6209299c077a6e9d280516 | [
"MIT"
] | null | null | null | # Create your views here.
from django.db.models import QuerySet
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import mixins
from account.documents import DjangoFilterDescriptionInspector
from account.models import Customer
from account.serializers import CustomerInfoSerializer, SignUpFormSerializer
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_description="회원 개인정보 조회 API",
filter_inspectors=[DjangoFilterDescriptionInspector],
))
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_description="회원 가입 API",
))
@method_decorator(name='update', decorator=swagger_auto_schema(
operation_description="회원 정보 수정 API",
))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_description="회원 탈퇴 API",
))
class CustomerAPIViewSet(mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset: QuerySet = Customer.objects
permission_classes = (IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
def get_serializer_class(self):
if self.request.method == 'POST':
return SignUpFormSerializer
elif self.request.method == 'GET':
return CustomerInfoSerializer
elif self.request.method == 'PUT':
return SignUpFormSerializer
elif self.request.method == 'DELETE':
return SignUpFormSerializer
def get_permissions(self):
if self.request.method == 'POST':
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
| 40.534483 | 103 | 0.722671 | 1,317 | 0.549896 | 0 | 0 | 1,854 | 0.774113 | 0 | 0 | 218 | 0.091023 |
b95fe9aa9fab4f285d9028f8b01c9820d83254e4 | 3,831 | py | Python | src/front-door/azext_front_door/_validators.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/front-door/azext_front_door/_validators.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/front-door/azext_front_door/_validators.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
def get_name_or_id_validator(dest, child_type=None, resource_type='Frontdoors', resource_namespace='Microsoft.Network',
resource_name_dest='front_door_name'):
def _validate_name_or_id(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
is_list = True
# treat single values as a list, but convert back in the end
if not isinstance(names_or_ids, list):
is_list = False
names_or_ids = [names_or_ids]
if names_or_ids == [None] or not names_or_ids:
return
ids = []
for val in names_or_ids:
id_params = {
'subscription': subscription_id,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'name': getattr(namespace, resource_name_dest) if child_type else val,
'child_type_1': child_type,
'child_name_1': val if child_type else None
}
if not is_valid_resource_id(val):
val = resource_id(**id_params)
ids.append(val)
setattr(namespace, dest, ids if is_list else ids[0])
return _validate_name_or_id
def validate_waf_policy(cmd, namespace):
get_name_or_id_validator(
dest='waf_policy',
resource_type='WebApplicationFirewallPolicy'
)(cmd, namespace)
def validate_keyvault(cmd, namespace):
get_name_or_id_validator(
dest='vault',
resource_type='vaults',
resource_namespace='Microsoft.Keyvault'
)(cmd, namespace)
def validate_load_balancing_settings(cmd, namespace):
get_name_or_id_validator('load_balancing_settings', 'loadBalancingSettings')(cmd, namespace)
def validate_probe_settings(cmd, namespace):
get_name_or_id_validator('probe_settings', 'healthProbeSettings')(cmd, namespace)
def validate_frontend_endpoints(cmd, namespace):
get_name_or_id_validator('frontend_endpoints', 'frontendEndpoints')(cmd, namespace)
def validate_backend_pool(cmd, namespace):
get_name_or_id_validator('backend_pool', 'backendPools')(cmd, namespace)
def validate_rules_engine(cmd, namespace):
get_name_or_id_validator('rules_engine', 'rulesEngines')(cmd, namespace)
# pylint: disable=protected-access
class MatchConditionAction(argparse._AppendAction):
# pylint: disable=no-self-use
def parse_match_condition(self, values):
from azext_front_door.vendored_sdks.models import MatchCondition
if not isinstance(values, list):
values = values.split(' ')
try:
return MatchCondition(
match_variable=values[0],
operator=values[1],
match_value=values[2:]
)
except IndexError:
from knack.util import CLIError
raise CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]')
def __call__(self, parser, namespace, values, option_string=None):
match_condition = self.parse_match_condition(values)
super(MatchConditionAction, self).__call__(parser, namespace, match_condition, option_string)
| 35.472222 | 119 | 0.645262 | 871 | 0.227356 | 0 | 0 | 0 | 0 | 0 | 0 | 923 | 0.240929 |
b960f3f5be88ef82754359823e7c6a9b7ed78089 | 7,763 | py | Python | mimesis/data/int/development.py | DevAerial/mimesis | 33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b | [
"MIT"
] | null | null | null | mimesis/data/int/development.py | DevAerial/mimesis | 33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b | [
"MIT"
] | 1 | 2022-03-26T07:46:59.000Z | 2022-03-26T07:47:20.000Z | mimesis/data/int/development.py | DevAerial/mimesis | 33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b | [
"MIT"
] | null | null | null | """Provides all the data related to the development."""
LICENSES = [
"Apache License, 2.0 (Apache-2.0)",
"The BSD 3-Clause License",
"The BSD 2-Clause License",
"GNU General Public License (GPL)",
"General Public License (LGPL)",
"MIT License (MIT)",
"Mozilla Public License 2.0 (MPL-2.0)",
"Common Development and Distribution License (CDDL-1.0)",
"Eclipse Public License (EPL-1.0)",
]
PROGRAMMING_LANGS = [
"ASP",
"Assembly",
"AutoIt",
"Awk",
"Bash",
"C",
"C Shell",
"C#",
"C++",
"Caml",
"Ceylon",
"Clojure",
"CoffeeScript",
"Common Lisp",
"D",
"Dart",
"Delphi",
"Dylan",
"ECMAScript",
"Elixir",
"Emacs Lisp",
"Erlang",
"F#",
"Falcon",
"Fortran",
"GNU Octave",
"Go",
"Groovy",
"Haskell",
"haXe",
"Io",
"J#",
"Java",
"JavaScript",
"Julia",
"Kotlin",
"Lisp",
"Lua",
"Mathematica",
"Objective-C",
"OCaml",
"Perl",
"PHP",
"PL-I",
"PL-SQL",
"PowerShell",
"Prolog",
"Python",
"R",
"Racket",
"Ruby",
"Rust",
"Scala",
"Scheme",
"Smalltalk",
"Tcl",
"Tex",
"Transact-SQL",
"TypeScript",
"Z shell",
]
OS = [
"Arch",
"CentOS",
"Debian",
"Fedora",
"FreeBSD",
"Gentoo",
"Kali",
"Lubuntu",
"Manjaro",
"Mint",
"OS X",
"macOS",
"OpenBSD",
"PCLinuxOS",
"Slackware",
"Ubuntu",
"Windows 10",
"Windows 7",
"Windows 8",
"Windows 8.1",
"Zorin",
"elementaryOS",
"macOS",
"openSUSE",
]
FOLDERS = [
"Development",
"Downloads",
"Documents",
"Music",
"Video",
"Work",
"Pictures",
"Desktop",
"Study",
]
PROJECT_NAMES = [
"aardonyx",
"abelisaurus",
"achelousaurus",
"achillobator",
"acrocanthosaurus",
"aegyptosaurus",
"afrovenator",
"agilisaurus",
"alamosaurus",
"albertaceratops",
"albertosaurus",
"alectrosaurus",
"alioramus",
"allosaurus",
"alvarezsaurus",
"amargasaurus",
"ammosaurus",
"ampelosaurus",
"amygdalodon",
"anatotitan",
"anchiceratops",
"anchisaurus",
"ankylosaurus",
"anserimimus",
"antarctopelta",
"antarctosaurus",
"apatosaurus",
"aragosaurus",
"aralosaurus",
"archaeoceratops",
"archaeopteryx",
"archaeornithomimus",
"argentinosaurus",
"arrhinoceratops",
"atlascopcosaurus",
"aucasaurus",
"austrosaurus",
"avaceratops",
"avalonia",
"avimimus",
"azendohsaurus",
"bactrosaurus",
"bagaceratops",
"bambiraptor",
"barapasaurus",
"barosaurus",
"baryonyx",
"becklespinax",
"beipiaosaurus",
"bellusaurus",
"borogovia",
"brachiosaurus",
"brachyceratops",
"bugenasaura",
"buitreraptor",
"camarasaurus",
"camptosaurus",
"carnotaurus",
"caudipteryx",
"cedarpelta",
"centrosaurus",
"ceratosaurus",
"cetiosauriscus",
"cetiosaurus",
"chaoyangsaurus",
"chasmosaurus",
"chialingosaurus",
"chindesaurus",
"chinshakiangosaurus",
"chirostenotes",
"chubutisaurus",
"chungkingosaurus",
"citipati",
"coelophysis",
"coelurus",
"coloradisaurus",
"compsognathus",
"conchoraptor",
"confuciusornis",
"corythosaurus",
"cryolophosaurus",
"dacentrurus",
"daspletosaurus",
"datousaurus",
"deinocheirus",
"deinonychus",
"deltadromeus",
"diceratops",
"dicraeosaurus",
"dilophosaurus",
"diplodocus",
"dracorex",
"dravidosaurus",
"dromaeosaurus",
"dromiceiomimus",
"dryosaurus",
"dryptosaurus",
"dubreuillosaurus",
"edmontonia",
"edmontosaurus",
"einiosaurus",
"elaphrosaurus",
"emausaurus",
"eolambia",
"eoraptor",
"eotyrannus",
"equijubus",
"erketu",
"erlikosaurus",
"euhelopus",
"euoplocephalus",
"europasaurus",
"euskelosaurus",
"eustreptospondylus",
"fukuiraptor",
"fukuisaurus",
"gallimimus",
"gargoyleosaurus",
"garudimimus",
"gasosaurus",
"gasparinisaura",
"gastonia",
"giganotosaurus",
"gilmoreosaurus",
"giraffatitan",
"gobisaurus",
"gorgosaurus",
"goyocephale",
"graciliceratops",
"gryposaurus",
"guaibasaurus",
"guanlong",
"hadrosaurus",
"hagryphus",
"haplocanthosaurus",
"harpymimus",
"herrerasaurus",
"hesperosaurus",
"heterodontosaurus",
"homalocephale",
"huayangosaurus",
"hylaeosaurus",
"hypacrosaurus",
"hypselosaurus",
"hypsilophodon",
"iguanodon",
"indosuchus",
"ingenia",
"irritator",
"isisaurus",
"janenschia",
"jaxartosaurus",
"jingshanosaurus",
"jinzhousaurus",
"jobaria",
"juravenator",
"kentrosaurus",
"khaan",
"kotasaurus",
"kritosaurus",
"lamaceratops",
"lambeosaurus",
"lapparentosaurus",
"leaellynasaura",
"leptoceratops",
"lesothosaurus",
"lexovisaurus",
"liaoceratops",
"liaoxiornis",
"ligabuesaurus",
"liliensternus",
"lophorhothon",
"lophostropheus",
"lufengosaurus",
"lurdusaurus",
"lycorhinus",
"magyarosaurus",
"maiasaura",
"majungatholus",
"malawisaurus",
"mamenchisaurus",
"mapusaurus",
"marshosaurus",
"masiakasaurus",
"massospondylus",
"maxakalisaurus",
"megalosaurus",
"melanorosaurus",
"metriacanthosaurus",
"microceratops",
"micropachycephalosaurus",
"microraptor",
"minmi",
"monolophosaurus",
"mononykus",
"mussaurus",
"muttaburrasaurus",
"nanotyrannus",
"nanshiungosaurus",
"nemegtosaurus",
"neovenator",
"neuquenosaurus",
"nigersaurus",
"nipponosaurus",
"noasaurus",
"nodosaurus",
"nomingia",
"nothronychus",
"nqwebasaurus",
"omeisaurus",
"ornitholestes",
"ornithomimus",
"orodromeus",
"oryctodromeus",
"othnielia",
"ouranosaurus",
"oviraptor",
"rebbachisaurus",
"rhabdodon",
"rhoetosaurus",
"rinchenia",
"riojasaurus",
"rugops",
"saichania",
"saltasaurus",
"saltopus",
"sarcosaurus",
"saurolophus",
"sauropelta",
"saurophaganax",
"saurornithoides",
"scelidosaurus",
"scutellosaurus",
"secernosaurus",
"segisaurus",
"segnosaurus",
"seismosaurus",
"shamosaurus",
"shanag",
"shantungosaurus",
"shunosaurus",
"shuvuuia",
"silvisaurus",
"sinocalliopteryx",
"sinornithosaurus",
"sinosauropteryx",
"sinraptor",
"sinvenator",
"zalmoxes",
"zephyrosaurus",
"zuniceratops",
"byzantine",
"svengali",
"accolade",
"acrimony",
"angst",
"anomaly",
"antidote",
"baroque",
"bona_fide",
"bourgeois",
"bravado",
"brogue",
"brusque",
"cacophony",
"caustic",
"charisma",
"cloying",
"deja-vu",
"dichotomy",
"elan",
"ennui",
"epitome",
"esoteric",
"euphemism",
"faux pas",
"fiasco",
"finagle",
"glib",
"harbinger",
"hedonist",
"heresy",
"idyllic",
"insidious",
"junket",
"kitsch",
"litany",
"lurid",
"malaise",
"malinger",
"mantra",
"maudlin",
"mercenary",
"misnomer",
"nirvana",
"oblivion",
"ogle",
"ostracize",
"panacea",
"paradox",
"peevish",
"propriety",
"revel",
"rhetoric",
"spartan",
"stigma",
"stoic",
"suave",
"sycophant",
"tirade",
"tryst",
"untenable",
"vicarious",
"vile",
"waft",
"zealous",
]
| 17.845977 | 61 | 0.551977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,167 | 0.665593 |
b962302fa813576c8cf57a4deea0db5f25dfb918 | 620 | py | Python | docs/mathparse.py | pcmoritz/flow | bc97132e9e2d05262bb6bbad5bda173fd9f4ae92 | [
"MIT"
] | 16 | 2018-05-25T06:30:28.000Z | 2020-08-08T00:03:47.000Z | docs/mathparse.py | pcmoritz/flow | bc97132e9e2d05262bb6bbad5bda173fd9f4ae92 | [
"MIT"
] | 46 | 2018-05-22T21:32:55.000Z | 2019-06-12T13:10:02.000Z | docs/mathparse.py | pcmoritz/flow | bc97132e9e2d05262bb6bbad5bda173fd9f4ae92 | [
"MIT"
] | 6 | 2018-06-22T14:59:14.000Z | 2019-08-29T06:00:34.000Z | """
A preliminary attempt at parsing an RST file's math syntax
in order to make math render as inline rather than display
mode. This doesn't work as of yet but might be useful.
It could, however, be not useful if there's a pandoc option
for converting .md to .rst that makes math inline and not
display. Keeping it around, though.
"""
import re
s = """Define
.. math:: v_{des}
as the desired velocity,
.. math:: 1^k
a vector of ones of length"""
with open('/Users/nishant/Downloads/tutorialtest.rst', 'r') as myfile:
s = myfile.read()
print([elem[11:-2] for elem in re.findall('\n.. math:: *\S*\n\n', s)])
| 22.962963 | 70 | 0.693548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.812903 |
b96253f9f9bc87e42d80842aebed3aa7dacb859b | 1,994 | py | Python | lib/layout/primitives.py | tailhook/pyzza | 610be6ee4bea9b64f8226faf7338523fdafdf2cf | [
"MIT"
] | 2 | 2015-08-07T15:39:25.000Z | 2019-03-31T12:45:37.000Z | lib/layout/primitives.py | tailhook/pyzza | 610be6ee4bea9b64f8226faf7338523fdafdf2cf | [
"MIT"
] | null | null | null | lib/layout/primitives.py | tailhook/pyzza | 610be6ee4bea9b64f8226faf7338523fdafdf2cf | [
"MIT"
] | null | null | null | from layout import Shape, Widget
from flash.text.engine import TextBlock, TextElement
@package('layout')
class Poly(Shape):
__slots__ = ('fillcolor', 'sequence')
def __init__(self, name, fillcolor, seq, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.sequence = seq
def draw(self, w, h):
g = self.graphics
g.clear()
for line in values(self.sequence):
g.beginFill(self.fillcolor)
g.moveTo(int(line[0][0]*w), int(line[0][1]*h))
for idx in range(1, line.length):
g.lineTo(int(line[idx][0]*w), int(line[idx][1]*h))
g.endFill()
@package('layout')
class RoundRect(Shape):
__slots__ = ('fillcolor', 'radius')
def __init__(self, name, fillcolor, radius, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.radius = radius
def draw(self, width, height):
g = self.graphics
g.clear()
g.beginFill(self.fillcolor)
g.drawRoundRect(0, 0, width, height, self.radius, self.radius)
g.endFill()
@package('layout')
class TextLine(Widget):
__slots__ = ('format', 'text', 'textline')
def __init__(self, format, text, name, states):
self.format = format
self.text = text
super().__init__(name, states)
def draw(self, width, height):
if self.textline:
self.removeChild(self.textline)
tb = TextBlock()
tb.content = TextElement(self.text, self.format)
self.textline = tb.createTextLine(None, width)
self.addChild(self.textline)
@package('layout')
class CenteredLine(TextLine):
def __init__(self, format, text, name, states):
super().__init__(format, text, name, states)
def draw(self, width, height):
super().draw(width, height)
self.textline.x = int((width - self.textline.width)/2)
self.textline.y = int((height - self.textline.height)/2)
| 32.688525 | 70 | 0.609829 | 1,824 | 0.914744 | 0 | 0 | 1,900 | 0.952859 | 0 | 0 | 96 | 0.048144 |
b963a238595dc05d6bc40e6f5888099b52a8fc14 | 20,515 | py | Python | tests/testing_server.py | ImportTaste/WebRequest | 0cc385622624de16ec980e0c12d9080d593cab74 | [
"WTFPL"
] | null | null | null | tests/testing_server.py | ImportTaste/WebRequest | 0cc385622624de16ec980e0c12d9080d593cab74 | [
"WTFPL"
] | null | null | null | tests/testing_server.py | ImportTaste/WebRequest | 0cc385622624de16ec980e0c12d9080d593cab74 | [
"WTFPL"
] | null | null | null |
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False):
# print("Capturing expected headers:")
# print(expected_headers)
assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers)
for key, val in expected_headers.items():
assert isinstance(key, str)
assert isinstance(val, str)
cookie_key = uuid.uuid4().hex
log = logging.getLogger("Main.TestServer")
sucuri_reqs_1 = 0
sucuri_reqs_2 = 0
sucuri_reqs_3 = 0
class MockServerRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def validate_headers(self):
for key, value in expected_headers.items():
if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding':
# So PhantomJS monkeys with accept-encoding headers
# Just ignore that particular header, I guess.
pass
# Selenium is fucking retarded, and I can't override the user-agent
# and other assorted parameters via their API at all.
elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language':
pass
elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept':
pass
elif not skip_header_checks:
v1 = value.replace(" ", "")
v2 = self.headers[key]
if v2 is None:
v2 = ""
v2 = v2.replace(" ", "")
test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format(
key,
value,
self.headers[key],
{
'is_annoying_pjs' : is_annoying_pjs,
'is_chromium' : is_chromium,
'is_selenium_garbage_chromium' : is_selenium_garbage_chromium,
'skip_header_checks' : skip_header_checks,
},
)
)
def _get_handler(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
# print("Path: ", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
self.validate_headers()
except Exception:
self.send_response(500)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Headers failed validation!")
raise
if self.path == "/":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/favicon.ico":
self.send_response(404)
self.end_headers()
elif self.path == "/raw-txt":
self.send_response(200)
self.send_header('Content-type', "text/plain")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html-decode":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html/real":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Root OK?</body></html>")
elif self.path == "/compressed/deflate":
self.send_response(200)
self.send_header('Content-Encoding', 'deflate')
self.send_header('Content-type', "text/html")
self.end_headers()
inb = b"Root OK?"
cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)
t1 = cobj.compress(inb) + cobj.flush()
self.wfile.write(t1)
elif self.path == "/compressed/gzip":
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(gzip.compress(b"Root OK?"))
elif self.path == "/json/invalid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT")
elif self.path == "/json/valid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/json/no-coding":
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/filename/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/path-only-trailing-slash/":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-html-suffix":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='lolercoaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='loler coaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\"loler coaster.html\"")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/explicit-html-mime":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/redirect/bad-1":
self.send_response(302)
self.end_headers()
elif self.path == "/redirect/bad-2":
self.send_response(302)
self.send_header('location', "bad-2")
self.end_headers()
elif self.path == "/redirect/bad-3":
self.send_response(302)
self.send_header('location', "gopher://www.google.com")
self.end_headers()
elif self.path == "/redirect/from-1":
self.send_response(302)
self.send_header('location', "to-1")
self.end_headers()
elif self.path == "/redirect/to-1":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-1")
elif self.path == "/redirect/from-2":
self.send_response(302)
self.send_header('uri', "to-2")
self.end_headers()
elif self.path == "/redirect/to-2":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-2")
elif self.path == "/redirect/from-3":
self.send_response(302)
newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1])
self.send_header('uri', newurl)
self.end_headers()
elif self.path == "/password/expect":
# print("Password")
# print(self.headers)
self.send_response(200)
self.end_headers()
if not 'Authorization' in self.headers:
self.wfile.write(b"Password not sent!!")
return
val = self.headers['Authorization']
passval = val.split(" ")[-1]
passstr = base64.b64decode(passval)
if passstr == b'lol:wat':
self.wfile.write(b"Password Ok?")
else:
self.wfile.write(b"Password Bad!")
elif self.path == "/content/have-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>")
elif self.path == "/content/no-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
##################################################################################################################################
# Cookie stuff
##################################################################################################################################
elif self.path == '/cookie_test':
cook = cookies.SimpleCookie()
cook['cookie_test_key'] = cookie_key
cook['cookie_test_key']['path'] = "/"
cook['cookie_test_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())
self.end_headers()
self.wfile.write(b"<html><body>CF Cookie Test</body></html>")
elif self.path == '/cookie_require':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cookie_test_key' and cook_value == cookie_key:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>")
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie is missing</body></html>")
##################################################################################################################################
# Sucuri validation
##################################################################################################################################
elif self.path == '/sucuri_shit_3':
# I'd like to get this down to just 2 requests (cookie bounce, and fetch).
# Doing that requires pulling html content out of chromium, though.
# Annoying.
nonlocal sucuri_reqs_3
sucuri_reqs_3 += 1
if sucuri_reqs_3 > 3:
raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit_2':
# This particular path is the one we should already have a cookie for.
# As such, we expect one request only
nonlocal sucuri_reqs_2
sucuri_reqs_2 += 1
if sucuri_reqs_2 > 1:
raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit':
nonlocal sucuri_reqs_1
sucuri_reqs_1 += 1
if sucuri_reqs_1 > 4:
raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1)
# print("Fetch for ", self.path)
# print("Cookies:", self.headers.get_all('Cookie', failobj=[]))
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
##################################################################################################################################
# Cloudflare validation
##################################################################################################################################
elif self.path == '/cloudflare_under_attack_shit_2':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cloudflare_under_attack_shit':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594':
cook = cookies.SimpleCookie()
cook['cloudflare_validate_key'] = cookie_key
cook['cloudflare_validate_key']['path'] = "/"
cook['cloudflare_validate_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())
self.end_headers()
body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>"
self.wfile.write(body.encode("utf-8"))
##################################################################################################################################
# Handle requests for an unknown path
##################################################################################################################################
else:
test_context.assertEqual(self.path, "This shouldn't happen!")
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
log.info("Request for URL path: '%s'", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
return self._get_handler()
except Exception as e:
log.error("Exception in handler!")
for line in traceback.format_exc().split("\n"):
log.error(line)
raise e
return MockServerRequestHandler
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_server(assertion_class,
from_wg,
port_override = None,
is_chromium = None,
is_selenium_garbage_chromium = False,
is_annoying_pjs = False,
skip_header_checks = False
):
# Configure mock server.
if port_override:
mock_server_port = port_override
else:
mock_server_port = get_free_port()
expected_headers = dict(from_wg.browserHeaders)
print(from_wg)
print(expected_headers)
assert isinstance(expected_headers, dict)
captured_server = capture_expected_headers(
expected_headers = expected_headers,
test_context = assertion_class,
is_chromium = is_chromium,
is_selenium_garbage_chromium = is_selenium_garbage_chromium,
is_annoying_pjs = is_annoying_pjs,
skip_header_checks = skip_header_checks
)
retries = 4
for x in range(retries + 1):
try:
mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)
break
except OSError:
time.sleep(0.2)
if x >= retries:
raise
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_port, mock_server, mock_server_thread
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
| 32.929374 | 165 | 0.640653 | 17,801 | 0.867707 | 0 | 0 | 0 | 0 | 0 | 0 | 7,644 | 0.372605 |
b963e6196b8baa521ce89adb40142bf81a9183a6 | 3,770 | py | Python | calcgrades.py | qrowsxi/calcgrades | 93c71c1afef8dde5174726ae1702b71ccba633de | [
"MIT"
] | null | null | null | calcgrades.py | qrowsxi/calcgrades | 93c71c1afef8dde5174726ae1702b71ccba633de | [
"MIT"
] | null | null | null | calcgrades.py | qrowsxi/calcgrades | 93c71c1afef8dde5174726ae1702b71ccba633de | [
"MIT"
] | null | null | null | import csv
import math
import numpy as np
import pandas
import scipy.optimize
import sys
import argparse
def ineq_constraint_1(v):
return np.array([vi for vi in v])
def ineq_constraint_2(v):
return np.array([-vi + 30 for vi in v])
class WeightAverage:
def __init__(self, mean, csv):
self.df = pandas.read_csv(csv)
self.course = self.df['name']
self.expected_mean = mean
self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]
self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))])
self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]
self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0]
self.tot_credits = sum(self.owned_credits) + sum(self.credits)
def weight_average(self, v):
term1 = 0
term2 = 0
for i in range(0, len(self.owned_grades)):
term1 = term1 + self.owned_grades[i] * self.owned_credits[i]
for i in range(0, len(v)):
term2 = term2 + v[i] * self.credits[i]
return (term1 + term2) / self.tot_credits
def eq_constraint(self, v):
return self.weight_average(v) - self.expected_mean
def solve(self):
cons = (
{'type': 'eq', 'fun': self.eq_constraint},
{'type': 'ineq', 'fun': ineq_constraint_1},
{'type': 'ineq', 'fun': ineq_constraint_2})
res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)
if not res.success:
return None
return res.x
def error_no_solution():
print("Mean not possible with current vote :(")
exit(0)
def output_result(solver, sol):
avg = solver.weight_average(sol)
df = solver.df
print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110")
if sol is None:
print("Not Possible with current grades :(")
exit()
for index, row in df.query('grade > 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}")
i = 0
for index, row in df.query('grade == 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}")
i += 1
return 0
def main():
name = "calcGrades"
description = """CalcGrades is an utility which purpose is to compute the minimum
grades required to get a certain weight average of the grades over the credits,
given the desired output and the grades already owned."""
parser = argparse.ArgumentParser(name, description=description)
parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean')
parser.add_argument('--file',dest='file', default='courses.csv', type=str,
help='path to the csv file containing the courses (default: courses.csv)')
parser.add_argument('--floor', default=False, action='store_true',
help='apply floor operation instead of round to solution')
parser.add_argument('--ceil', default=False, action='store_true',
help='apply ceil operation instead of round to solution')
args = parser.parse_args()
mean = args.mean
courses = args.file
solver = WeightAverage(mean, courses)
sol = solver.solve()
if sol is None:
error_no_solution()
if args.ceil:
sol = [math.ceil(x) for x in sol]
elif args.floor:
sol = [math.floor(x) for x in sol]
else:
sol = [round(x) for x in sol]
output_result(solver, sol)
return 0
if __name__ == '__main__':
main()
| 35.566038 | 116 | 0.609284 | 1,464 | 0.388329 | 0 | 0 | 0 | 0 | 0 | 0 | 991 | 0.262865 |
b9652ceb78b45d3bef98c61d48e3cd4630133615 | 19,317 | py | Python | sdk/python/pulumi_google_native/testing/v1/test_matrix.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/testing/v1/test_matrix.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/testing/v1/test_matrix.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TestMatrixArgs', 'TestMatrix']
@pulumi.input_type
class TestMatrixArgs:
def __init__(__self__, *,
environment_matrix: pulumi.Input['EnvironmentMatrixArgs'],
result_storage: pulumi.Input['ResultStorageArgs'],
test_specification: pulumi.Input['TestSpecificationArgs'],
client_info: Optional[pulumi.Input['ClientInfoArgs']] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TestMatrix resource.
:param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written.
:param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test.
:param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
"""
pulumi.set(__self__, "environment_matrix", environment_matrix)
pulumi.set(__self__, "result_storage", result_storage)
pulumi.set(__self__, "test_specification", test_specification)
if client_info is not None:
pulumi.set(__self__, "client_info", client_info)
if fail_fast is not None:
pulumi.set(__self__, "fail_fast", fail_fast)
if flaky_test_attempts is not None:
pulumi.set(__self__, "flaky_test_attempts", flaky_test_attempts)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@environment_matrix.setter
def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']):
pulumi.set(self, "environment_matrix", value)
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Input['ResultStorageArgs']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@result_storage.setter
def result_storage(self, value: pulumi.Input['ResultStorageArgs']):
pulumi.set(self, "result_storage", value)
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Input['TestSpecificationArgs']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@test_specification.setter
def test_specification(self, value: pulumi.Input['TestSpecificationArgs']):
pulumi.set(self, "test_specification", value)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@client_info.setter
def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]):
pulumi.set(self, "client_info", value)
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> Optional[pulumi.Input[bool]]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@fail_fast.setter
def fail_fast(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_fast", value)
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@flaky_test_attempts.setter
def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "flaky_test_attempts", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
class TestMatrix(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test.
:param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
:param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written.
:param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TestMatrixArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param TestMatrixArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = client_info
if environment_matrix is None and not opts.urn:
raise TypeError("Missing required property 'environment_matrix'")
__props__.__dict__["environment_matrix"] = environment_matrix
__props__.__dict__["fail_fast"] = fail_fast
__props__.__dict__["flaky_test_attempts"] = flaky_test_attempts
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
if result_storage is None and not opts.urn:
raise TypeError("Missing required property 'result_storage'")
__props__.__dict__["result_storage"] = result_storage
if test_specification is None and not opts.urn:
raise TypeError("Missing required property 'test_specification'")
__props__.__dict__["test_specification"] = test_specification
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["timestamp"] = None
super(TestMatrix, __self__).__init__(
'google-native:testing/v1:TestMatrix',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix':
"""
Get an existing TestMatrix resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = None
__props__.__dict__["environment_matrix"] = None
__props__.__dict__["fail_fast"] = None
__props__.__dict__["flaky_test_attempts"] = None
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["project"] = None
__props__.__dict__["result_storage"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["test_specification"] = None
__props__.__dict__["timestamp"] = None
return TestMatrix(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> pulumi.Output[bool]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> pulumi.Output[int]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@property
@pulumi.getter(name="invalidMatrixDetails")
def invalid_matrix_details(self) -> pulumi.Output[str]:
"""
Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state.
"""
return pulumi.get(self, "invalid_matrix_details")
@property
@pulumi.getter(name="outcomeSummary")
def outcome_summary(self) -> pulumi.Output[str]:
"""
Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED.
"""
return pulumi.get(self, "outcome_summary")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Indicates the current progress of the test matrix.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="testExecutions")
def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]:
"""
The list of test executions that the service creates for this matrix.
"""
return pulumi.get(self, "test_executions")
@property
@pulumi.getter(name="testMatrixId")
def test_matrix_id(self) -> pulumi.Output[str]:
"""
Unique id set by the service.
"""
return pulumi.get(self, "test_matrix_id")
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@property
@pulumi.getter
def timestamp(self) -> pulumi.Output[str]:
"""
The time this test matrix was initially created.
"""
return pulumi.get(self, "timestamp")
| 50.436031 | 458 | 0.67671 | 18,864 | 0.976549 | 0 | 0 | 15,506 | 0.802713 | 0 | 0 | 9,637 | 0.498887 |
b965c021bcb2dac479172708e85ad9ed89f09ef2 | 5,427 | py | Python | View/View.py | MoriokaReimen/ConfigHeaderGenerator | 73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e | [
"MIT"
] | null | null | null | View/View.py | MoriokaReimen/ConfigHeaderGenerator | 73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e | [
"MIT"
] | null | null | null | View/View.py | MoriokaReimen/ConfigHeaderGenerator | 73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e | [
"MIT"
] | null | null | null | import tkinter as tk
import tkinter.messagebox
from Control import Control
class View:
def __init__(self, control : Control.Control):
self.control = control
# Init Window
self.root = tk.Tk()
self.root.title(u"Header File Generator")
self.root.geometry("700x800")
self.config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_enable = tk.Label(self.config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 2)
for i, config in enumerate(self.control.getConfigs()):
symbol_entry = tk.Entry(self.config_frame, width=20)
symbol_entry.insert(tk.END, config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.config_frame, width=40)
detail_entry.insert(tk.END, config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
bt_enable = tk.Button(self.config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 2)
self.config_frame.pack(side=tk.TOP, anchor=tk.NW)
self.value_config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.value_config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.value_config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_value = tk.Label(self.value_config_frame, width = 10)
lb_value["text"] = "Value"
lb_value.grid(row = 0, column = 2)
lb_enable = tk.Label(self.value_config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 3)
for i, val_config in enumerate(self.control.getValConfigs()):
symbol_entry = tk.Entry(self.value_config_frame, width=20)
symbol_entry.insert(tk.END, val_config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.value_config_frame, width=40)
detail_entry.insert(tk.END, val_config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
value_entry = tk.Entry(self.value_config_frame, width=10)
value_entry.insert(tk.END, val_config.value)
value_entry.config(state = tk.DISABLED)
value_entry.config(disabledforeground = "black", disabledbackground = "white")
value_entry.grid(row= i + 1, column = 2)
bt_enable = tk.Button(self.value_config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 3)
self.value_config_frame.pack(side=tk.TOP, anchor=tk.W)
# Generator Button
self.bt_generate = tk.Button(self.root)
self.bt_generate["text"] = "Generate Header"
self.bt_generate["command"] = self.generateHeader
self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE)
def start(self):
self.root.mainloop()
def generateHeader(self):
self.control.generateHeader()
tk.messagebox.showinfo("Header Generator Info", "Generated:{0}".format(self.control.header_config.path))
def update(self):
pass
def toggle_config_enable(self, id, button : tk.Button):
config = self.control.getConfigs()[id]
config.enable = not config.enable
button["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
button.config(bg=color, activebackground = color)
def toggle_val_config_enable(self, id, button : tk.Button):
val_config = self.control.getValConfigs()[id]
val_config.enable = not val_config.enable
button["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
button.config(bg=color, activebackground = color)
| 43.071429 | 112 | 0.629445 | 5,350 | 0.985812 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.085314 |
b9669e29ffa745ca4256305d7461bcbe497cc930 | 1,428 | py | Python | tests/bugs/core_3355_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_3355_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_3355_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_3355
# title: Wrong comparsion of DATE and TIMESTAMP if index is used
# decription:
# tracker_id: CORE-3355
# min_versions: ['2.1.5']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """create table tdate (id integer not null primary key, val date);
create index tdateix1 on tdate (val);
commit;
insert into tdate values (0, '1997-12-31');
insert into tdate values (1, '1998-01-01');
insert into tdate values (2, '1998-01-02');
insert into tdate values (3, '1998-01-03');
insert into tdate values (4, '1998-01-04');
insert into tdate values (5, '1998-01-05');
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000';
select count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
COUNT
=====================
1
COUNT
=====================
5
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 25.052632 | 95 | 0.641457 | 0 | 0 | 0 | 0 | 183 | 0.128151 | 0 | 0 | 953 | 0.667367 |
b967ba0197b144171458b230c2dfe31844ba0b72 | 5,231 | py | Python | dags/download_decrypt_transfer_files.py | hms-dbmi/bch-pic-sure-airflow-dags | 0c1e6f07da4e270581942e551ac30284474921d4 | [
"Apache-2.0"
] | null | null | null | dags/download_decrypt_transfer_files.py | hms-dbmi/bch-pic-sure-airflow-dags | 0c1e6f07da4e270581942e551ac30284474921d4 | [
"Apache-2.0"
] | null | null | null | dags/download_decrypt_transfer_files.py | hms-dbmi/bch-pic-sure-airflow-dags | 0c1e6f07da4e270581942e551ac30284474921d4 | [
"Apache-2.0"
] | null | null | null | """
@author: anilkdegala
"""
import os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import date, timedelta, datetime
from collections import OrderedDict
from scripts.dag_pebbles import DagPebbles
from airflow.configuration import conf
from scripts.configurations import *
from airflow.operators.dummy_operator import DummyOperator
default_args = {
"owner": "anilkdegala",
"depends_on_past": True,
"max_active_runs": 1,
"start_date": datetime(2015, 6, 1),
"is_active": True,
"is_paused_upon_creation": False,
}
def begin_pipeline(**kwargs):
print("begin_pipeline:")
files = kwargs['dag_run'].conf.get('files')
download_decrypt_arguments = ''
transfer_arguments_list = []
for f in files:
print("download_decrypt_transfer_files: file: ", f['name'], ', location: ', f['path'])
output = f['name']+','+f['path']+','+f['final_name']
download_decrypt_arguments = download_decrypt_arguments + " " + output
transfer_arguments_list.append(DATA_LOCATION + "/"+f['final_name'])
transfer_arguments = ",".join(transfer_arguments_list)
print("final download_decrypt_arguments: ",download_decrypt_arguments)
print("final transfer_arguments: ",transfer_arguments)
kwargs["ti"].xcom_push(key="download_decrypt_arguments", value=download_decrypt_arguments)
kwargs["ti"].xcom_push(key="transfer_arguments", value=transfer_arguments)
def pipeline_enable_check(**kwargs):
dp = DagPebbles()
if dp.pipeline_enable_check('DATA_LOAD'):
return "pipeline_check_passed"
else:
return "pipeline_check_skipped"
def pipeline_check_passed(**kwargs):
print("pipeline_check_passed:")
def end_pipeline(**kwargs):
print("end_pipeline:")
def pipeline_check_skipped(**kwargs):
print("pipeline_check_skipped:")
def cleanup(**kwargs):
dp = DagPebbles()
print("cleanup")
def notify(**kwargs):
dp = DagPebbles()
print("notify")
def end(**kwargs):
dp = DagPebbles()
print("end")
with DAG( "DOWNLOAD_DECRYPT_TRANSFER",
description="Download, Decrypt, Transfer files (Source: S3, Staging: EC2: Target: RDS Oracle)",
default_args=default_args,
schedule_interval=None,
catchup=False,
orientation="TB",
tags=['Utils'],
dagrun_timeout=timedelta(hours=240)
) as dag:
t_pipeline_begin = PythonOperator(
task_id="begin_pipeline",
python_callable=begin_pipeline,
provide_context=True,
dag=dag,
)
t_check_pipeline = BranchPythonOperator(
task_id="check_pipeline",
python_callable=pipeline_enable_check,
provide_context=True,
dag=dag,
)
t_pipeline_check_passed = PythonOperator(
task_id="pipeline_check_passed",
python_callable=pipeline_check_passed,
provide_context=True,
dag=dag,
)
t_pipeline_check_skipped = PythonOperator(
task_id="pipeline_check_skipped",
python_callable=pipeline_check_skipped,
provide_context=True,
dag=dag,
)
download_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/download_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}}"
t_download_files = BashOperator(
task_id='download_files',
bash_command=download_files_cmd,
dag=dag)
decrypt_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/decrypt_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}} "
t_decrypt_files = BashOperator(
task_id='decrypt_files',
bash_command=decrypt_files_cmd,
dag=dag)
transfer_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/transfer_files_rds.pl "+"{{ ti.xcom_pull(key='transfer_arguments')}} "
t_transfer_files = BashOperator(
task_id='transfer_files',
bash_command=transfer_files_cmd,
dag=dag)
t_end_pipeline = PythonOperator(
task_id="end_pipeline",
python_callable=end_pipeline,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_notify = PythonOperator(
task_id="send_notifications",
python_callable=notify,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_cleanup = PythonOperator(
task_id="cleanup",
python_callable=cleanup,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_end = PythonOperator(
task_id="end",
python_callable=end,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_pipeline_begin >> t_check_pipeline
t_check_pipeline >> t_pipeline_check_skipped >> t_end_pipeline
t_check_pipeline >> t_pipeline_check_passed >> t_download_files >> t_decrypt_files >> t_transfer_files >> t_end_pipeline
t_end_pipeline >> t_cleanup >> t_notify >> t_end
| 30.770588 | 171 | 0.664118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,239 | 0.236857 |
b96834dcae4311b040352e86ae4bdc019619193a | 7,518 | py | Python | keystone-moon/keystone/endpoint_policy/controllers.py | hashnfv/hashnfv-moon | daaba34fa2ed4426bc0fde359e54a5e1b872208c | [
"Apache-2.0"
] | null | null | null | keystone-moon/keystone/endpoint_policy/controllers.py | hashnfv/hashnfv-moon | daaba34fa2ed4426bc0fde359e54a5e1b872208c | [
"Apache-2.0"
] | null | null | null | keystone-moon/keystone/endpoint_policy/controllers.py | hashnfv/hashnfv-moon | daaba34fa2ed4426bc0fde359e54a5e1b872208c | [
"Apache-2.0"
] | 1 | 2021-03-21T11:38:30.000Z | 2021-03-21T11:38:30.000Z | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import controller
from keystone.common import dependency
from keystone import notifications
@dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api')
class EndpointPolicyV3Controller(controller.V3Controller):
collection_name = 'endpoints'
member_name = 'endpoint'
def __init__(self):
super(EndpointPolicyV3Controller, self).__init__()
notifications.register_event_callback(
'deleted', 'endpoint', self._on_endpoint_delete)
notifications.register_event_callback(
'deleted', 'service', self._on_service_delete)
notifications.register_event_callback(
'deleted', 'region', self._on_region_delete)
notifications.register_event_callback(
'deleted', 'policy', self._on_policy_delete)
def _on_endpoint_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_endpoint(
payload['resource_info'])
def _on_service_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_service(
payload['resource_info'])
def _on_region_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_region(
payload['resource_info'])
def _on_policy_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_policy(
payload['resource_info'])
@controller.protected()
def create_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Create an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.create_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def check_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Check an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.check_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def delete_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Delete an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def create_policy_association_for_service(self, context,
policy_id, service_id):
"""Create an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.create_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def check_policy_association_for_service(self, context,
policy_id, service_id):
"""Check an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.check_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def delete_policy_association_for_service(self, context,
policy_id, service_id):
"""Delete an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def create_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Create an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.create_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def check_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Check an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.check_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def delete_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Delete an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def get_policy_for_endpoint(self, context, endpoint_id):
"""Get the effective policy for an endpoint."""
self.catalog_api.get_endpoint(endpoint_id)
ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id)
# NOTE(henry-nash): since the collection and member for this class is
# set to endpoints, we have to handle wrapping this policy entity
# ourselves.
self._add_self_referential_link(context, ref)
return {'policy': ref}
# NOTE(henry-nash): As in the catalog controller, we must ensure that the
# legacy_endpoint_id does not escape.
@classmethod
def filter_endpoint(cls, ref):
if 'legacy_endpoint_id' in ref:
ref.pop('legacy_endpoint_id')
return ref
@classmethod
def wrap_member(cls, context, ref):
ref = cls.filter_endpoint(ref)
return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref)
@controller.protected()
def list_endpoints_for_policy(self, context, policy_id):
"""List endpoints with the effective association to a policy."""
self.policy_api.get_policy(policy_id)
refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id)
return EndpointPolicyV3Controller.wrap_collection(context, refs)
| 45.017964 | 79 | 0.699654 | 6,754 | 0.898377 | 0 | 0 | 6,827 | 0.908087 | 0 | 0 | 1,723 | 0.229183 |
b96893ff0c22487256e91c812d37a56c2c479eb3 | 11,886 | py | Python | src/nibetaseries/cli/run.py | ipacheco-uy/NiBetaSeries | 3d8716552f22f925524d80af9aace09469c22d4d | [
"MIT"
] | 1 | 2019-10-03T21:20:48.000Z | 2019-10-03T21:20:48.000Z | src/nibetaseries/cli/run.py | ipacheco-uy/NiBetaSeries | 3d8716552f22f925524d80af9aace09469c22d4d | [
"MIT"
] | null | null | null | src/nibetaseries/cli/run.py | ipacheco-uy/NiBetaSeries | 3d8716552f22f925524d80af9aace09469c22d4d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m nibetaseries` python will execute
``__main__.py`` as a script. That means there won't be any
``nibetaseries.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nibetaseries.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
from __future__ import absolute_import
import os
import argparse
from argparse import RawTextHelpFormatter
from glob import glob
from multiprocessing import cpu_count
from nipype import config as ncfg
def get_parser():
"""Build parser object"""
from ..__init__ import __version__
import sys
verstr = 'nibs v{}'.format(__version__)
parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('derivatives_pipeline', help='The pipeline that contains '
'minimally preprocessed img, brainmask, and confounds.tsv')
parser.add_argument('output_dir', help='The directory where the output directory '
'and files should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='Level of the analysis that will be performed '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir')
parser.add_argument('-v', '--version', action='version',
version=verstr)
# Atlas Arguments (Required Options)
atlas_args = parser.add_argument_group('Required Atlas Arguments')
atlas_args.add_argument('-a', '--atlas-img', action='store',
required=('-l' in sys.argv or '--atlas-lut' in sys.argv),
help='input atlas nifti where each voxel within a "region" '
'is labeled with the same integer and there is a unique '
'integer associated with each region of interest.')
atlas_args.add_argument('-l', '--atlas-lut', action='store',
required=('-a' in sys.argv or '--atlas-img' in sys.argv),
help='atlas look up table (tsv) formatted with the columns: '
'index, regions which correspond to the regions in the '
'nifti file specified by --atlas-img.')
# preprocessing options
proc_opts = parser.add_argument_group('Options for processing')
proc_opts.add_argument('--estimator', default='lss',
choices=['lss', 'lsa'],
help='beta series modeling method')
proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,
help='select a smoothing kernel (mm)')
proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,
default=0.0078125, help='high pass filter (Hz)')
proc_opts.add_argument('-c', '--confounds', help='The confound column names '
'that are to be included in nuisance regression. '
'write the confounds you wish to include separated by a space',
nargs="+")
proc_opts.add_argument('--hrf-model', default='glover',
choices=['glover', 'spm', 'fir',
'glover + derivative',
'glover + derivative + dispersion',
'spm + derivative',
'spm + derivative + dispersion'],
help='convolve your regressors '
'with one of the following hemodynamic response functions')
proc_opts.add_argument('--fir-delays', default=None,
nargs='+', type=int, help='FIR delays in volumes',
metavar='VOL')
proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '
'are stored (i.e. non-essential files). '
'This directory can be deleted once you are reasonably '
'certain nibs finished as expected.')
# Image Selection options
image_opts = parser.add_argument_group('Options for selecting images')
parser.add_argument('--participant-label', nargs="+",
help='The label(s) of the participant(s) '
'that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.')
image_opts.add_argument('--session-label', action='store',
default=None, help='select a session to analyze')
image_opts.add_argument('-t', '--task-label', action='store',
default=None, help='select a specific task to be processed')
image_opts.add_argument('--run-label', action='store',
default=None, help='select a run to analyze')
image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',
choices=['MNI152NLin2009cAsym'],
help='select a bold derivative in a specific space to be used')
image_opts.add_argument('--description-label', action='store',
default=None, help='select a bold file with particular '
'`desc` label to process')
image_opts.add_argument('--exclude-description-label', action='store_true',
default=False, help='exclude this `desc` label from nibetaseries')
# performance options
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
# misc options
misc = parser.add_argument_group('misc options')
misc.add_argument('--graph', action='store_true', default=False,
help='generates a graph png of the workflow')
return parser
def main():
from ..workflows.base import init_nibetaseries_participant_wf
# get commandline options
opts = get_parser().parse_args()
# check inputs
if (opts.hrf_model == 'fir') and (opts.fir_delays is None):
raise ValueError('If the FIR HRF model is selected, '
'FIR delays must be provided.')
# Set up directories
# TODO: set up some sort of versioning system
bids_dir = os.path.abspath(opts.bids_dir)
derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline)
output_dir = os.path.abspath(opts.output_dir)
os.makedirs(output_dir, exist_ok=True)
log_dir = os.path.join(output_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
if opts.work_dir:
work_dir = os.path.abspath(opts.work_dir)
else:
work_dir = os.path.join(os.getcwd(), 'nibetaseries_work')
os.makedirs(work_dir, exist_ok=True)
# only for a subset of subjects
if opts.participant_label:
subject_list = opts.participant_label
# for all subjects
else:
subject_dirs = glob(os.path.join(bids_dir, "sub-*"))
subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs]
# Nipype plugin configuration
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {'log_directory': log_dir,
'log_to_file': True},
'execution': {'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'parameterize_dirs': False},
})
# running participant level
if opts.analysis_level == "participant":
nibetaseries_participant_wf = init_nibetaseries_participant_wf(
estimator=opts.estimator,
atlas_img=os.path.abspath(opts.atlas_img),
atlas_lut=os.path.abspath(opts.atlas_lut),
bids_dir=bids_dir,
derivatives_pipeline_dir=derivatives_pipeline_dir,
exclude_description_label=opts.exclude_description_label,
fir_delays=opts.fir_delays,
hrf_model=opts.hrf_model,
high_pass=opts.high_pass,
output_dir=output_dir,
run_label=opts.run_label,
selected_confounds=opts.confounds,
session_label=opts.session_label,
smoothing_kernel=opts.smoothing_kernel,
space_label=opts.space_label,
subject_list=subject_list,
task_label=opts.task_label,
description_label=opts.description_label,
work_dir=work_dir,
)
if opts.graph:
nibetaseries_participant_wf.write_graph(graph2use='colored',
format='svg',
simple_form=True)
try:
nibetaseries_participant_wf.run(**plugin_settings)
except RuntimeError as e:
if "Workflow did not execute cleanly" in str(e):
print("Workflow did not execute cleanly")
else:
raise e
elif opts.analysis_level == "group":
raise NotImplementedError('group analysis not currently implemented')
def init():
if __name__ == "__main__":
raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n"
"Please `pip install` NiBetaSeries and use the `nibs` command")
init()
| 46.611765 | 98 | 0.595406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,930 | 0.414774 |
b9693ae1ef191dd2735a2abba99bb1bc689af26f | 2,727 | py | Python | custom_components/senz/config_flow.py | astrandb/senz_hass | 6725d37fd9c6d250ac10a16e68c56908bf1c8404 | [
"MIT"
] | 2 | 2022-01-15T09:55:58.000Z | 2022-02-10T10:13:35.000Z | custom_components/senz/config_flow.py | astrandb/senz_hass | 6725d37fd9c6d250ac10a16e68c56908bf1c8404 | [
"MIT"
] | 4 | 2022-01-15T19:41:28.000Z | 2022-02-14T16:01:47.000Z | custom_components/senz/config_flow.py | astrandb/senz_hass | 6725d37fd9c6d250ac10a16e68c56908bf1c8404 | [
"MIT"
] | null | null | null | """Config flow for SENZ WiFi."""
from __future__ import annotations
import logging
from typing import Any
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
from .pysenz import PreAPI
class OAuth2FlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle SENZ WiFi OAuth2 authentication."""
DOMAIN = DOMAIN
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": "restapi offline_access",
}
async def async_step_reauth(
self, entry: dict[str, Any] | None = None
) -> FlowResult:
"""Perform reauth upon an API authentication error."""
self.entry = entry
persistent_notification.async_create(
self.hass,
f"Senz integration for account {entry['auth_implementation']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it.",
"Senz re-authentication",
"senz_reauth",
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"account": self.entry["auth_implementation"]},
data_schema=vol.Schema({}),
errors={},
)
persistent_notification.async_dismiss(self.hass, "senz_reauth")
return await self.async_step_user()
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an oauth config entry or update existing entry for reauth."""
pre_api = PreAPI(self.hass)
resp = await pre_api.getAccount(data["token"]["access_token"])
account = resp["userName"]
existing_entry = await self.async_set_unique_id(account)
if existing_entry:
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title=account, data=data)
| 34.518987 | 184 | 0.671067 | 2,363 | 0.86652 | 0 | 0 | 321 | 0.117712 | 1,829 | 0.6707 | 708 | 0.259626 |
b9697b05a9b44247d80463465fa92118d707fb98 | 6,465 | py | Python | astropy_helpers/git_helpers.py | bsipocz/astropy-helpers | 4999df1cfb6a5022347b0cef9caf8a556517c625 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 9 | 2019-12-06T13:12:33.000Z | 2021-10-05T12:47:15.000Z | astropy_helpers/git_helpers.py | bsipocz/astropy-helpers | 4999df1cfb6a5022347b0cef9caf8a556517c625 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2019-11-28T17:20:27.000Z | 2019-12-09T18:44:35.000Z | astropy_helpers/git_helpers.py | bsipocz/astropy-helpers | 4999df1cfb6a5022347b0cef9caf8a556517c625 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 | 2019-11-28T17:04:22.000Z | 2021-10-19T13:12:34.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for retrieving revision information from a project's git repository.
"""
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import locale
import os
import subprocess
import warnings
def _decode_stdio(stream):
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
stdio_encoding = 'utf-8'
try:
text = stream.decode(stdio_encoding)
except UnicodeDecodeError:
# Final fallback
text = stream.decode('latin1')
return text
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
def run_git(cmd):
try:
p = subprocess.Popen(['git'] + cmd, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return (None, b'', b'')
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using '
'default dev version.'.format(path))
return (p.returncode, b'', b'')
if p.returncode == 129:
if show_warning:
warnings.warn('Your git looks old (does it support {0}?); '
'consider upgrading to v1.7.2 or '
'later.'.format(cmd[0]))
return (p.returncode, stdout, stderr)
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: {0}'.format(_decode_stdio(stderr)))
return (p.returncode, stdout, stderr)
return p.returncode, stdout, stderr
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 128:
# git returns 128 if the command is not run from within a git
# repository tree. In this case, a warning is produced above but we
# return the default dev version of '0'.
return '0'
elif not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
# This function is tested but it is only ever executed within a subprocess when
# creating a fake package, so it doesn't get picked up by coverage metrics.
def _get_repo_path(pathname, levels=None): # pragma: no cover
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None
| 33.324742 | 79 | 0.612065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,176 | 0.491261 |
b96b280416f0d557826ffa670a7914f2d45e5fc5 | 526 | py | Python | src/sot_talos_balance/test/test_feet_admittance.py | imaroger/sot-talos-balance | 5e56700b4e105273ecf6feb3474789beac469a77 | [
"BSD-2-Clause"
] | null | null | null | src/sot_talos_balance/test/test_feet_admittance.py | imaroger/sot-talos-balance | 5e56700b4e105273ecf6feb3474789beac469a77 | [
"BSD-2-Clause"
] | null | null | null | src/sot_talos_balance/test/test_feet_admittance.py | imaroger/sot-talos-balance | 5e56700b4e105273ecf6feb3474789beac469a77 | [
"BSD-2-Clause"
] | null | null | null | '''Test feet admittance control'''
from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_feet_admittance.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
print('Set saturation value')
runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')
input("Wait before dumping the data")
runCommandClient('dump_tracer(robot.tracer)')
| 25.047619 | 97 | 0.752852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.505703 |
b96bb8e94e8bbfe556cc0ad3a314b6991573aa47 | 544 | py | Python | tests/test_db.py | davebryson/py-tendermint | ec6a38a54950d9841759b0f2ed93659b58948a03 | [
"Apache-2.0"
] | 24 | 2017-08-18T20:36:27.000Z | 2020-03-27T08:55:39.000Z | tests/test_db.py | davebryson/py-tendermint | ec6a38a54950d9841759b0f2ed93659b58948a03 | [
"Apache-2.0"
] | 6 | 2017-10-14T05:50:34.000Z | 2019-06-03T08:39:49.000Z | tests/test_db.py | davebryson/py-tendermint | ec6a38a54950d9841759b0f2ed93659b58948a03 | [
"Apache-2.0"
] | 5 | 2018-01-09T11:07:06.000Z | 2019-06-02T14:34:34.000Z | import os
from tendermint.db import VanillaDB
from tendermint.utils import home_dir
def test_database():
dbfile = home_dir('temp', 'test.db')
db = VanillaDB(dbfile)
db.set(b'dave',b'one')
result = db.get(b'dave')
assert(b'one' == result)
db.set(b'dave',b'two')
result = db.get(b'dave')
assert(b'two' == result)
assert(None == db.get(b'doesntexist'))
assert(db.exists(b'dave'))
db.delete(b'dave')
assert(db.exists(b'dave') == False)
if os.path.exists(dbfile):
os.remove(dbfile)
| 20.923077 | 42 | 0.621324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.1875 |
b96d766a7c5eab27eb3785b1277b6beccda7c9ed | 1,446 | py | Python | auth/tests/test_views.py | asb29/Redundant | ee816fd41f9217610bd11f757cf9175288723c70 | [
"MIT"
] | null | null | null | auth/tests/test_views.py | asb29/Redundant | ee816fd41f9217610bd11f757cf9175288723c70 | [
"MIT"
] | null | null | null | auth/tests/test_views.py | asb29/Redundant | ee816fd41f9217610bd11f757cf9175288723c70 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.test import Client
class RegisterTestCase(TestCase):
def test_register(self):
c = Client()
# on success redirects to /
response = c.post('/accounts/register/', {
'username': 'asdas',
'password1': 'asdasdasd12',
'password2': 'asdasdasd12'
})
self.assertRedirects(response, '/')
# passwords don't match
response = c.post('/accounts/register/', {
'username': 'asdasdasd1',
'password1': 'asdasdasd1',
'password2': 'asdasdasd2'
})
self.assertEquals(response.status_code, 200)
# username is empty
response = c.post('/accounts/register/', {
'username': '',
'password1': 'asdasdasd12',
'password2': 'asdasdasd12'
})
self.assertEquals(response.status_code, 200)
# no password
response = c.post('/accounts/register/', {
'username': 'asdasdasd',
'password1': '',
'password2': ''
})
self.assertEquals(response.status_code, 200)
# username and password are similar
response = c.post('/accounts/register/', {
'username': 'asdasdasd0',
'password1': 'asdasdasd1',
'password2': 'asdasdasd1'
})
self.assertEquals(response.status_code, 200)
| 30.125 | 52 | 0.53527 | 1,379 | 0.953665 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.368603 |
b96f6c5854c1e905c9ad5d8f08d016972c710a1f | 4,134 | py | Python | projects/OneNet/onenet/head.py | iFighting/OneNet | 6e33b46d2aa13131262833c75f0fd1c3d224ef03 | [
"MIT"
] | 2 | 2021-06-16T01:31:17.000Z | 2021-11-25T15:27:28.000Z | projects/OneNet/onenet/head.py | xieenze/OneNet | 3b06ad6832727cef4c0262389de4cdbb2a666197 | [
"MIT"
] | null | null | null | projects/OneNet/onenet/head.py | xieenze/OneNet | 3b06ad6832727cef4c0262389de4cdbb2a666197 | [
"MIT"
] | 1 | 2021-02-04T06:38:42.000Z | 2021-02-04T06:38:42.000Z | #
# Modified by Peize Sun
# Contact: [email protected]
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
OneNet Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
import math
from typing import Optional, List
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from detectron2.modeling.poolers import ROIPooler, cat
from detectron2.structures import Boxes
from .deconv import CenternetDeconv
class Head(nn.Module):
def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]):
super().__init__()
# Build heads.
num_classes = cfg.MODEL.OneNet.NUM_CLASSES
d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1]
activation = cfg.MODEL.OneNet.ACTIVATION
self.deconv = CenternetDeconv(cfg, backbone_shape)
self.num_classes = num_classes
self.d_model = d_model
self.num_classes = num_classes
self.activation = _get_activation_fn(activation)
self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1)
self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1)
self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1)
# Init parameters.
prior_prob = cfg.MODEL.OneNet.PRIOR_PROB
self.bias_value = -math.log((1 - prior_prob) / prior_prob)
self._reset_parameters()
def _reset_parameters(self):
# init all parameters.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# initialize the bias for focal loss.
if p.shape[-1] == self.num_classes:
nn.init.constant_(p, self.bias_value)
def forward(self, features_list):
features = self.deconv(features_list)
locations = self.locations(features)[None]
feat = self.activation(self.feat1(features))
class_logits = self.cls_score(feat)
pred_ltrb = F.relu(self.ltrb_pred(feat))
pred_bboxes = self.apply_ltrb(locations, pred_ltrb)
return class_logits, pred_bboxes
def apply_ltrb(self, locations, pred_ltrb):
"""
:param locations: (1, 2, H, W)
:param pred_ltrb: (N, 4, H, W)
"""
pred_boxes = torch.zeros_like(pred_ltrb)
pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1
pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1
pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2
pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2
return pred_boxes
@torch.no_grad()
def locations(self, features, stride=4):
"""
Arguments:
features: (N, C, H, W)
Return:
locations: (2, H, W)
"""
h, w = features.size()[-2:]
device = features.device
shifts_x = torch.arange(
0, w * stride, step=stride,
dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, h * stride, step=stride,
dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
locations = locations.reshape(h, w, 2).permute(2, 0, 1)
return locations
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 32.296875 | 94 | 0.600871 | 3,152 | 0.762458 | 0 | 0 | 859 | 0.207789 | 0 | 0 | 830 | 0.200774 |
b96fae5c29fd446ea7199733a629bbe0f6190046 | 49,876 | py | Python | mermaid/utils.py | HastingsGreer/mermaid | bd13c5fc427eb8cd9054973a8eaaeb302078182d | [
"Apache-2.0"
] | 120 | 2019-10-29T23:53:02.000Z | 2022-03-30T02:59:58.000Z | mermaid/utils.py | AlexanderChristgau/mermaid | ba07883cc3cb5982e4655048a434b4495cb49c6d | [
"Apache-2.0"
] | 10 | 2019-11-05T09:28:35.000Z | 2022-01-09T19:12:51.000Z | mermaid/utils.py | AlexanderChristgau/mermaid | ba07883cc3cb5982e4655048a434b4495cb49c6d | [
"Apache-2.0"
] | 19 | 2019-11-10T13:34:39.000Z | 2022-03-13T20:30:10.000Z | """Various utility functions.
.. todo::
Reorganize this package in a more meaningful way.
"""
from __future__ import print_function
from __future__ import absolute_import
# from builtins import str
# from builtins import range
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from .libraries.modules.stn_nd import STN_ND_BCXYZ
from .data_wrapper import AdaptVal
from .data_wrapper import MyTensor
from . import smoother_factory as sf
from .data_wrapper import USE_CUDA
import numpy as np
from . import finite_differences as fd
import torch.nn as nn
import torch.nn.init as init
from . import module_parameters as pars
from .spline_interpolation import SplineInterpolation_ND_BCXYZ
import os
try:
from .libraries.functions.nn_interpolation import get_nn_interpolation
except ImportError:
print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '
'Some functionality may not be available.')
def my_hasnan(x):
"""Check if any input elements are NaNs.
:param x: numpy array
:return: True if NaNs are present, False else
"""
return (x != x).any()
def create_symlink_with_correct_ext(sf, tf):
abs_s = os.path.abspath(sf)
ext_s = os.path.splitext(abs_s)[1]
abs_t = os.path.abspath(tf)
root_t,ext_t = os.path.splitext(abs_t)
abs_t_with_right_ext = root_t + ext_s
if os.path.isfile(abs_t_with_right_ext):
if os.path.samefile(abs_s,abs_t_with_right_ext):
# nothing to do here, these are already the same file
return
else:
os.remove(abs_t_with_right_ext)
# now we can do the symlink
os.symlink(abs_s,abs_t_with_right_ext)
def combine_dict(d1,d2):
"""Creates a dictionary which has entries from both of them.
:param d1: dictionary 1
:param d2: dictionary 2
:return: resulting dictionary
"""
d = d1.copy()
d.update(d2)
return d
def get_parameter_list_from_parameter_dict(pd):
"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""
pl = []
for key in pd:
pl.append(pd[key])
return pl
def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd):
"""Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys
based on memory id.
:param pd: parameter dictionary
:return: tuple of (parameter_list, name_dictionary)
"""
par_to_name_dict = dict()
pl = []
for key in pd:
pl.append(pd[key])
par_to_name_dict[pd[key]] = key
return pl, par_to_name_dict
def remove_infs_from_variable(v):
# 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor
# 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor
# 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor
# todo: maybe find a cleaner way of handling this
# this is to make sure that subsequent sums work (hence will be smaller than it could be,
# but values of this size should not occur in practice anyway
sz = v.size()
reduction_factor = np.prod(np.array(sz))
condition = True
if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float32').min))/reduction_factor,
max=(np.asscalar(np.finfo('float32').max))/reduction_factor)
elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float64').min))/reduction_factor,
max=(np.asscalar(np.finfo('float64').max))/reduction_factor)
elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float16').min))/reduction_factor,
max=(np.asscalar(np.finfo('float16').max))/reduction_factor)
else:
raise ValueError('Unknown data type: ' + str( type(v.data)))
def lift_to_dimension(A, dim):
"""Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim > dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim == dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def get_dim_of_affine_transform(Ab):
"""Returns the number of dimensions corresponding to an affine transformation of the
form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply
[a1;a2;a3;b], i.e., all columns stacked on top of each other.
:param Ab: parameter vector
:return: dimensionality of transform (1,2,or 3)
"""
nr = len(Ab)
if nr==2:
return 1
elif nr==6:
return 2
elif nr==12:
return 3
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity(Ab):
"""Sets the affine transformation as given by the column vector Ab to the identity transform.
:param Ab: Affine parameter vector (will be overwritten with the identity transform)
:return:
"""
dim = get_dim_of_affine_transform(Ab)
if dim==1:
Ab.zero_()
Ab[0]=1.
elif dim==2:
Ab.zero_()
Ab[0]=1.
Ab[3]=1.
elif dim==3:
Ab.zero_()
Ab[0]=1.
Ab[4]=1.
Ab[8]=1.
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity_multiN(Ab):
"""Set the affine transforms to the identity (in the case of arbitrary batch size).
:param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.
:return:
"""
sz = Ab.size()
nr_of_images = sz[0]
for nrI in range(nr_of_images):
set_affine_transform_to_identity(Ab[nrI, :])
def get_inverse_affine_param(Ab):
"""Computes inverse of affine transformation.
Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb
:param Ab: B x pars (batch size x param. vector)
:return: Inverse of affine parameters
"""
dim =0
if Ab.shape[1] == 2:
dim = 1
elif Ab.shape[1] == 6:
dim = 2
elif Ab.shape[1] == 12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2)
Ab_inv = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_inv = torch.inverse(Ab[n, :, :dim])
Ab_inv[n, :, :dim] = tm_inv
Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim])
inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1)
return inv_affine_param
def update_affine_param(Ab, Cd):
"""Update affine parameters.
Formally: C(Ax+b)+d = CAx+Cb+d
:param Ab: B x pars (batch size x param. vector)
:return: Updated affine parameters
"""
dim = 0
if Ab.shape[1]==2:
dim = 1
elif Ab.shape[1]==6:
dim = 2
elif Ab.shape[1]==12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2)
Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2)
updated_param = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim])
updated_param[n,:,:dim] = tm_param
updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim]
updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1)
return updated_param
def apply_affine_transform_to_map(Ab,phi):
"""Applies an affine transform to a map.
:param Ab: affine transform parameter column vector
:param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed map
"""
sz = phi.size()
dim = len(sz) - 1
if dim not in [1,2,3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
phiR = MyTensor(sz).zero_().type_as(phi)
if dim == 1:
phiR = phi * Ab[0] + Ab[1]
elif dim == 2:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2
elif dim == 3:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9]
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10]
phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11]
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
return phiR
def apply_affine_transform_to_map_multiNC(Ab,phi):
"""Applies an affine transform to maps (for arbitrary batch size).
:param Ab: affine transform parameter column vectors (batch size x param. vector)
:param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed maps
"""
sz = phi.size()
dim = get_dim_of_affine_transform(Ab[0,:])
nr_of_images = Ab.size()[0]
if nr_of_images != sz[0]:
raise ValueError('Incompatible number of affine transforms')
if dim != len(sz)-2:
raise ValueError('Incompatible number of affine transforms')
phiR = MyTensor(sz).zero_().type_as(phi)
for nrI in range(nr_of_images):
phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...])
return phiR
def compute_normalized_gaussian(X, mu, sig):
"""Computes a normalized Gaussian.
:param X: map with coordinates at which to evaluate
:param mu: array indicating the mean
:param sig: array indicating the standard deviations for the different dimensions
:return: Normalized Gaussian evaluated at coordinates in X
Example::
>>> mu, sig = [1,1], [1,1]
>>> X = [0,0]
>>> print(compute_normalized_gaussian(X, mu, sig)
"""
dim = len(mu)
if dim == 1:
g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.)))
g = g/g.sum()
return g
elif dim == 2:
g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.))
- np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)))
g = g/g.sum()
return g
elif dim == 3:
g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.))
-np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))
-np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.)))
g = g / g.sum()
return g
else:
raise ValueError('Can only compute Gaussians in dimensions 1-3')
def _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
# return get_warped_label_map(I0,phi,spacing)
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size XxYxZ
:param phi: map for the warping, size dimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size XxYxZ
"""
# implements this by creating a different view (effectively adding dimensions)
Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))),
phi.view(torch.Size([1] + list(phi.size()))),
spacing,
spline_order,
zero_boundary,
use_01_input)
return Iw.view(I0.size())
def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size BxCxXxYxZ
:param phi: map for the warping, size BxdimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size BxCxXxYxZ
"""
dim = I0.dim()-2
if dim == 1:
return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 2:
return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 3:
return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
else:
raise ValueError('Images can only be warped in dimensions 1 to 3')
def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize):
"""Computes spacing for the low-res parametrization from image spacing.
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
#todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)
def _get_low_res_size_from_size(sz, factor):
"""Returns the corresponding low-res size from a (high-res) sz.
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) or (factor >= 1):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return np.array(sz)
else:
low_res_sz = np.array(sz)
low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16')
return low_res_sz
def _compute_low_res_image(I, spacing, low_res_size, spline_order):
import mermaid.image_sampling as IS
sampler = IS.ResampleImage()
low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order)
return low_res_image
def individual_parameters_to_model_parameters(ind_pars):
model_pars = dict()
if type(ind_pars) == type(dict()):
# should already be in the right format
model_pars = ind_pars
else:
# if ind_pars is not a dictionary assume that they come from the optimizer
# (i.e., list and each list element has a dictionary with keys 'name' and 'model_params'
for par in ind_pars:
model_pars[par['name']] = par['model_params']
return model_pars
def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, BxCxXxYxZ
:param I: image, BxCxXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
nrOfI = sz[0] # number of images
m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC
nrOfC = sz[1]
for c in range(nrOfC): # loop over all the channels and add the results
m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...],
I[:, c, ...],
nrOfI,
sz[2::],
spacing)
return m
def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, batchxXxYxZ
:param I: image, batchXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
fdt = fd.FD_torch(spacing)
dim = len(sz)
m = create_ND_vector_field_variable_multiN(sz, nrOfI)
if dim == 1:
m[:, 0, :] = fdt.dXc(I)*lam
elif dim == 2:
m[:, 0, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :] = fdt.dYc(I)*lam
elif dim == 3:
m[:, 0, :, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :, :] = fdt.dYc(I)*lam
m[:, 2, :, :, :] = fdt.dZc(I)*lam
else:
raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3')
return m
def create_ND_vector_field_variable_multiN(sz, nr_of_images=1):
"""
Create vector field torch Variable of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nr_of_images, dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0., 1e-7)
def create_ND_vector_field_variable(sz):
"""Create vector field torch Variable of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:return: returns vector field of size dimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0.,1e-7)
def create_vector_parameter(nr_of_elements):
"""Creates a vector parameters with a specified number of elements.
:param nr_of_elements: number of vector elements
:return: returns the parameter vector
"""
return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7))
def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False):
"""Create vector field torch Parameter of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI, dim]+list(csz))
if get_field_from_external_network:
tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7)
tmp.requires_grad = True
else:
tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
return tmp
def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
nr_of_mg_weights = len(gaussian_std_weights)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nr_of_mg_weights]+list(csz))
weights = torch.empty(*csz)
# set the default
if sched =='w_K_w':
gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights]
for g in range(nr_of_mg_weights):
weights[:, g, ...] = gaussian_std_weights[g]
tmp = AdaptVal(weights)
if get_preweight_from_network:
tmp.requires_grad = True
else:
tmp = Parameter(tmp)
return tmp
def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:param nrOfC: number of channels
:return: returns vector field of size nrOfIxnrOfCxXxYxZ
"""
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nrOfC]+list(csz))
return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
def centered_identity_map_multiN(sz, spacing, dtype='float32'):
"""
Create a centered identity map (shifted so it is centered around 0)
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz) - 2
nrOfI = sz[0]
if dim == 1:
id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype)
return id
def identity_map_multiN(sz,spacing,dtype='float32'):
"""
Create an identity map
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz)-2
nrOfI = int(sz[0])
if dim == 1:
id = np.zeros([nrOfI,1,sz[2]],dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n,...] = identity_map(sz[2::],spacing,dtype=dtype)
return id
def centered_identity_map(sz, spacing, dtype='float32'):
"""
Returns a centered identity map (with 0 in the middle) if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0:sz[0]]
elif dim == 2:
id = np.mgrid[0:sz[0], 0:sz[1]]
elif dim == 3:
id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
for d in range(dim):
id[d] *= spacing[d]
if sz[d]%2==0:
#even
id[d] -= spacing[d]*(sz[d]//2)
else:
#odd
id[d] -= spacing[d]*((sz[d]+1)//2)
# and now store it in a dim+1 array
if dim == 1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0, :] = id[0]
elif dim == 2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0, :, :] = id[0]
idnp[1, :, :] = id[1]
elif dim == 3:
idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0, :, :, :] = id[0]
idnp[1, :, :, :] = id[1]
idnp[2, :, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
return idnp
#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.ones(*mask_sz))*mask_value
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
return mask.detach()
def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.zeros(*mask_sz))
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
if pow ==2:
mask = mask**2
if pow ==3:
mask = mask*mask*mask
return mask
# def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
def get_single_gaussian_smoother(gaussian_std,sz,spacing):
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = gaussian_std
s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)
return s_m
def get_warped_label_map(label_map, phi, spacing, sched='nn'):
if sched == 'nn':
warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True)
# check if here should be add assert
assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise"
else:
raise ValueError(" the label warping method is not implemented")
return warped_label_map
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.detach()).cpu().numpy()
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
def get_scalar(v):
if isinstance(v, float):
return v
elif isinstance(v, np.ndarray) and v.size == 1:
return float(v)
def checkNan(x):
""""
input should be list of Variable
"""
return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]
def noramlized_spacing_to_smallest(spacing):
min_sp = np.min(spacing)
spacing[spacing>min_sp]=min_sp
return spacing
def time_warped_function(f):
def __time_warped_function(input=None):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
output = f(input)
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print(start.elapsed_time(end))
return output
return __time_warped_function
def interoplate_boundary_right(tensor):
dim = len(tensor.shape)-2
if dim==1:
tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3]
if dim==2:
tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:]
tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3]
if dim==3:
tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :]
tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1. / (np.array(img_sz) - 1)
if identity_map is not None: # todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize = desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,
zero_boundary=zero_boundary, identity_map=identity_map)
return resampled
def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""
desiredSize = desiredSize[2:]
is_numpy = False
if not isinstance(I, torch.Tensor):
I = torch.Tensor(I)
is_numpy = True
sz = np.array(list(I.size()))
# check that the batch size and the number of channels is the same
nrOfI = sz[0]
nrOfC = sz[1]
desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize))
newspacing = spacing * ((sz[2::].astype('float') - 1.) / (
desiredSizeNC[2::].astype('float') - 1.)) ###########################################
if identity_map is not None:
idDes = identity_map
else:
idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing)))
# now use this map for resampling
ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary)
return ID if not is_numpy else ID.numpy(), newspacing
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')
else:
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')
if lowResSize[-1] % 2 != 0:
lowResSize[-1] -= 1
print(
'\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')
return lowResSize
def get_res_spacing_from_spacing(spacing, sz, lowResSize):
"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
# todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1)
########################################## Adaptive Net ###################################################3
def space_normal(tensors, std=0.1):
"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""
if isinstance(tensors, Variable):
space_normal(tensors.data, std=std)
return tensors
for n in range(tensors.size()[0]):
for c in range(tensors.size()[1]):
dim = tensors[n][c].dim()
sz = tensors[n][c].size()
mus = np.zeros(dim)
stds = std * np.ones(dim)
print('WARNING: What should the spacing be here? Needed for new identity map code')
raise ValueError('Double check the spacing here before running this code')
spacing = np.ones(dim)
centered_id = centered_identity_map(sz,spacing)
g = compute_normalized_gaussian(centered_id, mus, stds)
tensors[n,c] = torch.from_numpy(g)
def weights_init_uniform(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.038, 0.042)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
space_normal(m.weight.data)
elif classname.find('Linear') != -1:
space_normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_rd_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data)
elif classname.find('Linear') != -1:
init.normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'rd_normal':
net.apply(weights_init_rd_normal)
elif init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'uniform':
net.apply(weights_init_uniform)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def organize_data(moving, target, sched='depth_concat'):
if sched == 'depth_concat':
input = torch.cat([moving, target], dim=1)
elif sched == 'width_concat':
input = torch.cat((moving, target), dim=3)
elif sched == 'list_concat':
input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0)
elif sched == 'difference':
input = moving-target
return input
def bh(m,gi,go):
print("Grad Input")
print((torch.sum(gi[0].data), torch.sum(gi[1].data)))
print("Grad Output")
print(torch.sum(go[0].data))
return gi[0], gi[1], gi[2]
class ConvBnRel(nn.Module):
# conv + bn (optional) + relu
def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,
bn=False, reverse=False, bias=False):
super(ConvBnRel, self).__init__()
padding = int((kernel_size - 1) // 2) if same_padding else 0
if not reverse:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
else:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias)
#y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
#When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants.
self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class FcRel(nn.Module):
# fc+ relu(option)
def __init__(self, in_features, out_features, active_unit='relu'):
super(FcRel, self).__init__()
self.fc = nn.Linear(in_features, out_features)
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.fc(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class AdpSmoother(nn.Module):
"""
a simple conv. implementation, generate displacement field
"""
def __init__(self, inputs, dim, net_sched=None):
# settings should include [using_bias, using bn, using elu]
# inputs should be a dictionary could contain ['s'],['t']
super(AdpSmoother, self).__init__()
self.dim = dim
self.net_sched = 'm_only'
self.s = inputs['s'].detach()
self.t = inputs['t'].detach()
self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True)
self.get_net_sched()
#self.net.register_backward_hook(bh)
def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5):
# return the self.net and self.net_input
padding_size = (kernel_size-1)//2
if self.net_sched == 'm_only':
if debugging:
self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2)
else:
net = \
[ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched =='m_f_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_f_s_t':
if debugging:
self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s_f_t':
if debugging:
self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
def prepare_data(self, m, new_s):
input=None
if self.net_sched == 'm_only':
input = m
elif self.net_sched == 'm_f_s':
input = organize_data(m,self.s,sched='depth_concat')
elif self.net_sched == 'm_d_s':
input = organize_data(m, new_s, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_d_s_f_t':
input = organize_data(m, new_s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
return input
def forward(self, m,new_s=None):
m = m * self.mask
input = self.prepare_data(m,new_s)
x= input
x = self.net(x)
return x
| 34.805304 | 130 | 0.602675 | 6,600 | 0.132328 | 0 | 0 | 0 | 0 | 0 | 0 | 17,362 | 0.348103 |
b96fca03cef0164231c4fa09bc83db6c5b2aa7db | 1,093 | py | Python | examples/io/plot_read_evoked.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2021-01-04T08:45:56.000Z | 2021-05-19T12:25:59.000Z | examples/io/plot_read_evoked.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 28 | 2020-05-07T00:58:34.000Z | 2020-08-29T23:02:17.000Z | examples/io/plot_read_evoked.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T13:48:00.000Z | 2019-07-10T16:02:11.000Z | """
==================================
Reading and writing an evoked file
==================================
This script shows how to read and write evoked datasets.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from mne import read_evokeds
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
###############################################################################
# Show result as a butterfly plot:
# By using exclude=[] bad channels are not excluded and are shown in red
evoked.plot(exclude=[], time_unit='s')
# Show result as a 2D image (x: time, y: channels, color: amplitude)
evoked.plot_image(exclude=[], time_unit='s')
###############################################################################
# Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked
# responses to a file.
| 29.540541 | 79 | 0.569076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.6871 |
b970d836b7397be4bc4d63762c0eec8adfb90a91 | 611 | py | Python | source/monkeyPatches/__init__.py | lukaszgo1/nvda | 38a2efd1e1bff7db4471cb7afa03ab1590b7adef | [
"bzip2-1.0.6"
] | 19 | 2016-05-11T05:15:31.000Z | 2022-03-17T12:40:10.000Z | source/monkeyPatches/__init__.py | lukaszgo1/nvda | 38a2efd1e1bff7db4471cb7afa03ab1590b7adef | [
"bzip2-1.0.6"
] | 307 | 2015-08-27T11:22:33.000Z | 2022-03-29T10:43:34.000Z | source/monkeyPatches/__init__.py | lukaszgo1/nvda | 38a2efd1e1bff7db4471cb7afa03ab1590b7adef | [
"bzip2-1.0.6"
] | 14 | 2016-03-28T07:31:49.000Z | 2022-03-30T04:56:35.000Z | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2021 NV Access Limited
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
from . import wxMonkeyPatches
applyWxMonkeyPatches = wxMonkeyPatches.apply
def applyMonkeyPatches():
# Apply several monkey patches to comtypes
# F401 - imported but unused: Patches are applied during import
from . import comtypesMonkeyPatches # noqa: F401
# Apply patches to Enum, prevent cyclic references on ValueError during construction
from . import enumPatches
enumPatches.replace__new__()
| 30.55 | 86 | 0.761047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.633388 |
b970f8ccb56e24dd8d65fd92869bbf7790f6e611 | 5,298 | py | Python | yt_dlp/extractor/ninenow.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/ninenow.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/ninenow.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
smuggle_url,
str_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
class NineNowIE(InfoExtractor):
IE_NAME = '9now.com.au'
_VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
_TESTS = [{
# clip
'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc',
'md5': '17cf47d63ec9323e562c9957a968b565',
'info_dict': {
'id': '16801',
'ext': 'mp4',
'title': 'St. Kilda\'s Joey Montagna on the potential for a player\'s strike',
'description': 'Is a boycott of the NAB Cup "on the table"?',
'uploader_id': '4460760524001',
'upload_date': '20160713',
'timestamp': 1468421266,
},
'skip': 'Only available in Australia',
}, {
# episode
'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19',
'only_matching': True,
}, {
# DRM protected
'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1',
'only_matching': True,
}, {
# episode of series
'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3',
'info_dict': {
'id': '6249614030001',
'title': 'Episode 3',
'ext': 'mp4',
'season_number': 3,
'episode_number': 3,
'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.',
'uploader_id': '4460760524001',
'timestamp': 1619002200,
'upload_date': '20210421',
},
'expected_warnings': ['Ignoring subtitle tracks'],
'params':{
'skip_download': True,
}
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
page_data = self._parse_json(self._search_regex(
r'window\.__data\s*=\s*({.*?});', webpage,
'page data', default='{}'), display_id, fatal=False)
if not page_data:
page_data = self._parse_json(self._parse_json(self._search_regex(
r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;',
webpage, 'page data'), display_id), display_id)
for kind in ('episode', 'clip'):
current_key = page_data.get(kind, {}).get(
'current%sKey' % kind.capitalize())
if not current_key:
continue
cache = page_data.get(kind, {}).get('%sCache' % kind, {})
if not cache:
continue
common_data = {
'episode': (cache.get(current_key) or list(cache.values())[0])[kind],
'season': (cache.get(current_key) or list(cache.values())[0]).get('season', None)
}
break
else:
raise ExtractorError('Unable to find video data')
if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool):
self.report_drm(display_id)
brightcove_id = try_get(
common_data, lambda x: x['episode']['video']['brightcoveId'], compat_str) or 'ref:%s' % common_data['episode']['video']['referenceId']
video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id
title = try_get(common_data, lambda x: x['episode']['name'], compat_str)
season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int)
episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int)
timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], compat_str))
release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], compat_str))
thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {}
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
'width': int_or_none(thumbnail_id[1:]),
} for thumbnail_id, thumbnail_url in thumbnails_data.items()]
return {
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': self._GEO_COUNTRIES}),
'id': video_id,
'title': title,
'description': try_get(common_data, lambda x: x['episode']['description'], compat_str),
'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000),
'thumbnails': thumbnails,
'ie_key': 'BrightcoveNew',
'season_number': season_number,
'episode_number': episode_number,
'timestamp': timestamp,
'release_date': release_date,
}
| 43.073171 | 146 | 0.575123 | 5,058 | 0.9547 | 0 | 0 | 0 | 0 | 0 | 0 | 1,871 | 0.353152 |
b97242dec299cf214174fe1ceb1c2d4c7e16b595 | 4,783 | py | Python | apex/fp16_utils/fused_weight_norm.py | mcarilli/apex | 766e36c9e10fe4efd847c3f77c3b38974c89eab1 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T01:37:42.000Z | 2020-05-05T01:37:42.000Z | apex/fp16_utils/fused_weight_norm.py | mcarilli/apex | 766e36c9e10fe4efd847c3f77c3b38974c89eab1 | [
"BSD-3-Clause"
] | 1 | 2018-06-24T18:56:56.000Z | 2018-06-24T18:56:56.000Z | apex/fp16_utils/fused_weight_norm.py | mcarilli/apex | 766e36c9e10fe4efd847c3f77c3b38974c89eab1 | [
"BSD-3-Clause"
] | 1 | 2020-07-03T00:37:20.000Z | 2020-07-03T00:37:20.000Z | import torch
from torch.autograd import Variable
from torch.autograd.function import Function, once_differentiable
import apex_C
def check_contig_cuda(tensors, names):
for tensor, name in zip(tensors, names):
if not tensor.is_contiguous():
raise RuntimeError(name+" with size {} is not contiguous"
.format(tensor.size()))
if not tensor.is_cuda:
raise RuntimeError(name+".is_cuda = False."
"Currently, only cuda tensors are supported.")
class Fused_Weight_Norm(Function):
"""
Custom autograd function that implements weight norm, as presented in
`<https://arxiv.org/abs/1602.07868>`_,
along a tensor's slowest or
fastest dimension using fused kernel launches for the forward and backward passes.
Accepts fp32 or fp16 input; the output type will match the input type.
Within the kernels, all calculations are performed in fp32 for numerical stability, regardless
of input/output precision.
"""
@staticmethod
def forward(ctx, input, g, dim=0):
"""
Args:
input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous.
g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``.
dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported.
Returns:
Output tensor corresponding to **w** in the paper. Output type and precision will match
type and precision of ``input``.
"""
# torch.cuda.nvtx.range_push("FusedNorm.forward, input.size() = {}"
# .format(input.size()))
check_contig_cuda((input,g),("input","g"))
"""
This is ok, new() treats a torch.Size object properly.
No need to unpack with an asterisk via new(*input.size()).
"""
output = input.new(input.size()).contiguous()
"""
For output with size (slow, faster, faster, ...fastest), we want
norms with size (slow, 1, 1, ...1), so that if you want retrieve norms
and apply the same normalizing factors to another Tensor "t" with the
same size as output, "t/norms" will broadcast each element of norms
across the corresponding slowest dim of t.
"""
if dim == 0:
norm_size = (output.size(0),) + (1,)*(output.dim() - 1)
elif dim == output.dim() - 1:
norm_size = (1,)*(output.dim() - 1) + (output.size(-1),)
else:
raise RuntimeError("Currently, Fused_Weight_Norm only supports first or last dimension.")
norms = torch.cuda.FloatTensor(*norm_size).contiguous()
"""
Beware: If you call the following:
norms = torch.cuda.FloatTensor(norm_size).contiguous()
the constructor sees a tuple:
FloatTensor( (output_size(0),1,1,...) )
and creates a 1D tensor with values from the tuple:
[output_size(0),1,1,...].
"""
apex_C.weight_norm_fwd(output, norms, input, g, dim)
ctx.save_for_backward(input, g)
# save_for_backward can only save input or output tensors,
# use ctx state to save the norms and dimension:
ctx.norms = norms
ctx.dim = dim
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
"""
Args:
grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance.
Returns:
Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``.
"""
check_contig_cuda((grad_output), ("grad_output"))
savedInput, savedg = ctx.saved_tensors
savedNorms = ctx.norms
# We expect that these .contiguous() calls will be no-ops. They're present for safety.
grad_output_contig = grad_output.contiguous()
grad_input = grad_output_contig.new(grad_output.size()).contiguous()
grad_g = savedg.new(savedg.size()).contiguous()
apex_C.weight_norm_bwd(grad_input,
grad_g,
grad_output_contig,
savedInput,
savedg,
savedNorms,
ctx.dim)
return grad_input, grad_g, None
| 41.95614 | 175 | 0.604223 | 4,238 | 0.886055 | 0 | 0 | 3,733 | 0.780473 | 0 | 0 | 2,826 | 0.590843 |
b9724b70833f729e47c38eb018294247250b7282 | 23,312 | py | Python | bzt/modules/grinder.py | gerardorf/taurus | 610872b4cf70af31d79a346db1aebd3466310d77 | [
"Apache-2.0"
] | 1 | 2019-01-15T17:23:58.000Z | 2019-01-15T17:23:58.000Z | bzt/modules/grinder.py | gerardorf/taurus | 610872b4cf70af31d79a346db1aebd3466310d77 | [
"Apache-2.0"
] | null | null | null | bzt/modules/grinder.py | gerardorf/taurus | 610872b4cf70af31d79a346db1aebd3466310d77 | [
"Apache-2.0"
] | null | null | null | """
Module holds all stuff regarding Grinder tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import time
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.java import TaurusJavaHelper
from bzt.requests_model import HTTPRequest
from bzt.six import iteritems
from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR
class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
"""
Grinder executor module
"""
def __init__(self):
super(GrinderExecutor, self).__init__()
self.script = None
self.exec_id = "grinder-bzt-%s" % id(self)
self.properties_file = None
self.kpi_file = None
self.cmd_line = None
self.process = None
self.end_time = None
self.retcode = None
self.java_helper = None
def __write_base_props(self, fds):
"""
write base properties and base properties file contents to fds
:param fds: fds
:return:
"""
base_props_file = self.settings.get("properties-file")
if base_props_file:
fds.write("# Base Properies File Start: %s\n" % base_props_file)
with open(base_props_file) as bpf:
fds.write(bpf.read())
fds.write("# Base Properies File End: %s\n\n" % base_props_file)
# base props
base_props = self.settings.get("properties")
if base_props:
fds.write("# Base Properies Start\n")
for key, val in iteritems(base_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Base Properies End\n\n")
def __write_scenario_props(self, fds, scenario):
"""
Write scenario props and scenario file props to fds
:param fds:
:param scenario: dict
:return:
"""
script_props_file = scenario.get("properties-file")
if script_props_file:
fds.write("# Script Properies File Start: %s\n" % script_props_file)
with open(script_props_file) as spf:
fds.write(spf.read())
fds.write("# Script Properies File End: %s\n\n" % script_props_file)
# scenario props
local_props = scenario.get("properties")
if local_props:
fds.write("# Scenario Properies Start\n")
for key, val in iteritems(local_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Scenario Properies End\n\n")
def __write_bzt_props(self, fds):
"""
Write bzt properties to fds
:param fds:
:return:
"""
fds.write("# BZT Properies Start\n")
fds.write("grinder.hostID=%s\n" % self.exec_id)
fds.write("grinder.script=%s\n" % self.script.replace(os.path.sep, "/"))
fds.write("grinder.logDirectory=%s\n" % self.engine.artifacts_dir.replace(os.path.sep, "/"))
load = self.get_load()
if load.iterations or load.concurrency:
fds.write("grinder.runs=%s\n" % load.iterations or 0)
if load.concurrency:
fds.write("grinder.threads=%s\n" % load.concurrency)
if load.duration:
fds.write("grinder.duration=%s\n" % int(load.duration * 1000))
fds.write("# taurus load values in case you need them\n")
fds.write("taurus.concurrency=%s\n" % load.concurrency)
fds.write("taurus.throughput=%s\n" % load.throughput)
fds.write("taurus.ramp_up=%s\n" % load.ramp_up)
fds.write("taurus.steps=%s\n" % load.steps)
fds.write("taurus.hold_for=%s\n" % load.hold)
fds.write("taurus.iterations=%s\n" % load.iterations)
fds.write("# BZT Properies End\n")
def prepare(self):
self.stdout = open(self.engine.create_artifact("grinder", ".out"), "w")
self.stderr = open(self.engine.create_artifact("grinder", ".err"), "w")
self.install_required_tools()
scenario = self.get_scenario()
self.exec_id = self.label
self.script = self.get_script_path()
if not self.script:
if "requests" in scenario:
self.script = self.__scenario_from_requests()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Grinder tool (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
self.properties_file = self.engine.create_artifact("grinder", ".properties")
with open(self.properties_file, 'w') as fds:
self.__write_base_props(fds)
self.__write_scenario_props(fds, scenario)
self.__write_bzt_props(fds)
self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + "-kpi.log")
self.reader = DataLogReader(self.kpi_file, self.log)
self.reader.report_by_url = self.settings.get("report-by-url", False)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
# add logback configurations used by worker processes (logback-worker.xml)
self.env.add_path({"CLASSPATH": RESOURCES_DIR}, finish=True)
self.env.add_path({"CLASSPATH": self.java_helper.tool_path}, finish=True)
self.env.add_path({"CLASSPATH": self.settings.get("path", None)}, finish=True)
self.cmd_line = ["java", "net.grinder.Grinder", self.properties_file]
def startup(self):
"""
Should start the tool as fast as possible.
"""
self.env.set({"T_GRINDER_PREFIX": self.exec_id})
self.process = self.execute(self.cmd_line)
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise TaurusToolError:
"""
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode,
self.get_error_diagnostics())
return True
return False
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("Grinder worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
"""
Collect data file artifact
"""
if self.kpi_file:
self.engine.existing_artifact(self.kpi_file)
super(GrinderExecutor, self).post_process()
def __scenario_from_requests(self):
"""
Generate grinder scenario from requests
:return: script
"""
script = self.engine.create_artifact("grinder_requests", ".py")
builder = GrinderScriptBuilder(self.get_scenario(), self.log)
builder.label = self.label
builder.build_source_code()
builder.save(script)
return script
def install_required_tools(self):
grinder = self._get_tool(Grinder, config=self.settings)
self.settings["path"] = grinder.tool_path
self.java_helper = self._get_tool(TaurusJavaHelper)
required_tools = [self._get_tool(TclLibrary),
self._get_tool(JavaVM),
self.java_helper,
grinder]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
def get_widget(self):
if not self.widget:
if self.script is not None:
label = "Grinder: %s" % os.path.basename(self.script)
else:
label = None
self.widget = ExecutorWidget(self, label)
if self.get_load().ramp_up:
self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup
return self.widget
def resource_files(self):
resource_files = []
script_file_path = self.get_script_path()
if script_file_path:
resource_files.append(script_file_path)
prop_file = self.get_scenario().get("properties-file")
if prop_file:
resource_files.append(prop_file)
return resource_files
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
return diagnostics
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
DELIMITER = ","
DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes")
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.report_by_url = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.idx = {}
self.partial_buffer = ""
self.start_time = 0
self.end_time = 0
self.concurrency = 0
self.test_names = {}
self.known_threads = set()
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
self.log.debug("Reading grinder results...")
self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))
lnum = None
start = time.time()
for lnum, line in enumerate(self.lines):
if not self.idx:
if not line.startswith('data.'):
self.__split(line) # to capture early test name records
continue
line = line[line.find(' '):]
header_list = line.strip().split(self.DELIMITER)
for _ix, field in enumerate(header_list):
self.idx[field.strip()] = _ix
data_fields, worker_id = self.__split(line)
if not data_fields:
self.log.debug("Skipping line: %s", line.strip())
continue
yield self.parse_line(data_fields, worker_id, lnum)
if lnum is not None:
duration = time.time() - start
if duration < 0.001:
duration = 0.001
self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration)
def parse_line(self, data_fields, worker_id, lnum):
worker_id = worker_id.split('.')[1]
t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0)
r_time = int(data_fields[self.idx["Test time"]]) / 1000.0
latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0
r_code = data_fields[self.idx["HTTP response code"]].strip()
con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0
con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0
bytes_count = int(data_fields[self.idx["HTTP response length"]].strip())
test_id = data_fields[self.idx["Test"]].strip()
thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip()
if thread_id not in self.known_threads:
self.known_threads.add(thread_id)
self.concurrency += 1
url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)
if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]):
if not error_msg:
if r_code != '0':
error_msg = "HTTP %s" % r_code
else:
error_msg = "Java exception calling TestRunner"
else:
error_msg = None # suppress errors
if self.report_by_url:
label = url
elif test_id in self.test_names:
label = self.test_names[test_id]
else:
label = "Test #%s" % test_id
source_id = '' # maybe use worker_id somehow?
return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count
def __split(self, line):
if not line.endswith("\n"):
self.partial_buffer += line
return None, None
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
if not line.startswith('data.'):
line_parts = line.split(' ')
if len(line_parts) > 1:
if line_parts[1] == 'starting,':
# self.concurrency += 1
pass
elif line_parts[1] == 'finished':
if self.concurrency > 0:
self.concurrency -= 1
elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}:
test_id = line_parts[5][:-1]
test_name = ' '.join(line_parts[6:])
self.test_names[test_id] = test_name
self.log.debug("Recognized test id %s => %s", test_id, test_name)
return None, None
worker_id = line[:line.find(' ')]
line = line[line.find(' '):]
data_fields = line.split(self.DELIMITER)
if not data_fields[1].strip().isdigit():
return None, None
if len(data_fields) < max(self.idx.values()):
return None, None
return data_fields, worker_id
def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count):
url = ''
error_msg = None
for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize?
line = self.lines[lineNo].strip()
matched = self.DETAILS_REGEX.match(line)
if not matched:
continue
if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5):
return matched.group(2), matched.group(4)
return url, error_msg
class Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?)
VERSION = "3.11"
LOCAL_PATH = "~/.bzt/grinder-taurus/lib/grinder.jar"
def __init__(self, config=None, **kwargs):
settings = config or {}
grinder_path = settings.get("path", self.LOCAL_PATH)
grinder_path = get_full_path(grinder_path)
download_link = settings.get("download-link", "")
super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs)
self.version = self.VERSION
self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version)
def check_if_installed(self):
self.log.debug("Trying %s: %s", self.tool_name, self.tool_path)
try:
out, err = self.call(["java", "-classpath", self.tool_path, "net.grinder.Grinder"])
if err:
out += err
self.log.debug("%s stdout: %s", self.tool_name, out)
return True
except CALL_PROBLEMS as exc:
self.log.warning("%s check failed: %s", self.tool_name, exc)
return False
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
grinder_dist = self._download(use_link=bool(self.download_link))
self.log.info("Unzipping %s", grinder_dist)
unzip(grinder_dist, dest, 'grinder-' + self.version)
os.remove(grinder_dist)
self.log.info("Installed grinder successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class GrinderMirrorsManager(MirrorsManager):
MIRRORS_SOURCE = "https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder" \
"%203/{version}/grinder-{version}-binary.zip&dialog=true"
DOWNLOAD_LINK = "https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \
"/grinder-{version}-binary.zip?r=&ts=" + str(int(time.time())) + "&use_mirror=autoselect"
def __init__(self, http_client, parent_logger, grinder_version):
self.grinder_version = grinder_version
base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version)
super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
base_link = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}" \
"-binary.zip/download?use_mirror={mirror}"
li_search_pattern = re.compile(r'<li id=".*?">')
li_elements = li_search_pattern.findall(self.page_source)
if li_elements:
links = [base_link.format(version=self.grinder_version, mirror=link.strip('<li id="').strip('">')) for
link in li_elements]
default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
class GrinderScriptBuilder(PythonGenerator):
IMPORTS = """
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities
from HTTPClient import NVPair
"""
def __init__(self, scenario, parent_logger):
super(GrinderScriptBuilder, self).__init__(scenario, parent_logger)
self.label = "BZT Requests"
def build_source_code(self):
self.log.debug("Generating Python script for Grinder")
self.root.append(self.gen_comment("This script was generated by Taurus", indent=0))
self.root.append(self.add_imports())
self.root.append(self.gen_new_line())
default_address = self.scenario.get("default-address")
url_arg = "url=%r" % default_address if default_address else ""
self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0))
self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0))
self.root.append(self.gen_statement('test.record(request)', indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0))
self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0))
headers = self.scenario.get_headers()
if not self.scenario.get("keepalive", True):
headers['Connection'] = 'close'
if headers:
self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0))
for header, value in iteritems(headers):
self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4))
self.root.append(self.gen_statement("])", indent=0))
global_timeout = dehumanize_time(self.scenario.get("timeout", None))
if global_timeout:
self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0))
cookie_flag = int(self.scenario.get("store-cookie", True))
self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_runner_class())
@staticmethod
def __list_to_nvpair_list(items):
return "[" + ",".join("NVPair(%r, %r)" % (header, value) for header, value in items) + "]"
def gen_runner_class(self):
runner_classdef = self.gen_class_definition("TestRunner", ["object"])
sleep_method = self.gen_method_definition("rampUpSleeper", ["self"])
sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return"))
sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')"))
sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)"))
sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)"))
sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)"))
sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)"))
sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')"))
sleep_method.append(self.gen_new_line())
runner_classdef.append(sleep_method)
main_method = self.gen_method_definition("__call__", ["self"])
main_method.append(self.gen_statement("self.rampUpSleeper()"))
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Grinder script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
method = req.method.upper()
url = req.url
local_headers = req.headers
params = "[]"
headers = self.__list_to_nvpair_list(iteritems(local_headers))
main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers)))
think_time = dehumanize_time(req.priority_option('think-time'))
if think_time:
main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000)))
runner_classdef.append(main_method)
return runner_classdef
| 40.82662 | 119 | 0.618823 | 22,046 | 0.945693 | 1,254 | 0.053792 | 150 | 0.006434 | 0 | 0 | 5,650 | 0.242364 |
b972e358701b6b26d8d3c931dfecc57580620c15 | 467 | py | Python | test/Fortran/fixture/myfortran_flags.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 1,403 | 2017-11-23T14:24:01.000Z | 2022-03-30T20:59:39.000Z | test/Fortran/fixture/myfortran_flags.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 3,708 | 2017-11-27T13:47:12.000Z | 2022-03-29T17:21:17.000Z | test/Fortran/fixture/myfortran_flags.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 281 | 2017-12-01T23:48:38.000Z | 2022-03-31T15:25:44.000Z | import getopt
import sys
comment = ('#' + sys.argv[1]).encode()
opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy')
optstring = ''
length = len(comment)
for opt, arg in opts:
if opt == '-o': out = arg
elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write((optstring + "\n").encode())
for l in infile.readlines():
if l[:length] != comment:
outfile.write(l)
sys.exit(0)
| 27.470588 | 67 | 0.601713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.087794 |
b9736fc25869ac44481082e255dc93e0f52aa441 | 9,015 | py | Python | zen_knit/organizer/__init__.py | Zen-Reportz/zen_knit | 104c2693d2cc61520657131da769f5d59d2df8e9 | [
"MIT"
] | 30 | 2021-12-25T15:39:42.000Z | 2022-02-25T04:53:44.000Z | zen_knit/organizer/__init__.py | Zen-Reportz/zen_knit | 104c2693d2cc61520657131da769f5d59d2df8e9 | [
"MIT"
] | 11 | 2022-01-02T22:10:07.000Z | 2022-02-02T00:56:33.000Z | zen_knit/organizer/__init__.py | Zen-Reportz/zen_knit | 104c2693d2cc61520657131da769f5d59d2df8e9 | [
"MIT"
] | 2 | 2022-01-27T13:22:46.000Z | 2022-01-30T05:01:59.000Z | import io
import os
import base64
from pathlib import Path
from nbconvert import filters
from pygments.formatters.latex import LatexFormatter
from zen_knit import formattor
from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData
from zen_knit.formattor.html_formatter import HTMLFormatter
mime_extensions = {"image/png" : "png",
"image/jpg" : "jpg"}
class BaseOrganizer:
def __init__(self, executed_data: ExecutedData):
self.format_started = False
self.collected_string = ""
self.fig_folder = None
self.executed_data = executed_data
self.formatted_doc = []
self.organized_data = OrganizedData(
global_options = self.executed_data.global_options,
chunks = []
)
self._create_output_folder_name()
self._create_fig_folder()
self._organize_doc()
self._create_output_file_name()
def _create_output_file_name(self):
global_options = self.organized_data.global_options
global_options.output.file_name = global_options.input.file_name.split(".")[0] + "."+ global_options.output.format
def _create_output_folder_name(self):
global_options = self.organized_data.global_options
if global_options.output.dir is None:
global_options.output.dir = global_options.input.dir
def _create_fig_folder(self):
output_folder = self.organized_data.global_options.output.dir
Path(output_folder).mkdir(parents=True, exist_ok=True)
fig_folder = os.path.join(output_folder, self.organized_data.global_options.output.fig_dir)
self.fig_folder = fig_folder
Path(fig_folder).mkdir(parents=True, exist_ok=True)
def _parse_raw(self, data, output_type):
if data.get("code_text_raw") is not None:
if self._clean_up(data['code_text_raw']) is not None:
if output_type in ("code"):
t = {"type": "code", "str_data": data['code_text_raw'] }
elif output_type in ("sql"):
t = {"type": "sql", "str_data": data['code_text_raw'] }
else:
t = {"type": "markdown", "str_data": data['code_text_raw'] }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
else:
return False
def _coder_string(self, data):
list_ = ["stream", "error"]
if data["output_type"] is None:
return False
if data["output_type"] in list_:
if data["output_type"] == "stream":
if self._clean_up(data['text']) is not None:
t = {"type": "se_data", "str_data": data['text'] }
self.organized_data.chunks.append(OrganizedChunk(**t))
if data["output_type"] == "error":
t = {"type": "se_data", "str_data": data["evalue"] + filters.strip_ansi("".join(data["traceback"])) }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _raw_string(self, data):
if data["output_type"] is None:
return False
if data["output_type"] == "execute_result":
if data.get("data") is not None:
if 'matplotlib' in data["data"]["text/plain"]:
# Doing nothing here
return True
else:
if ((data["data"]["text/plain"][0] == "'") or (data["data"]["text/plain"][0] == '"')):
temp = data["data"]["text/plain"][1:-1]
else:
temp = data["data"]["text/plain"]
if "<table" in temp:
t = {"type": "html_data", "str_data":temp.encode().decode() }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
# if "BokehJS" in temp:
# t = {"type": "html_data", "str_data": "<script type='text/javascript'>" + temp.encode().decode() + "</script>" }
# self.organized_data.chunks.append(OrganizedChunk(**t))
# return True
if self._clean_up(temp) is not None:
t = {"type": "e_data", "str_data":temp }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return True
return False
def _raw_plots(self, data, chunk_option:ChunkOption):
if data["output_type"] is None:
return False
if data["output_type"] == "display_data":
plot_infos = self._save_plots(data, chunk_option)
t = {"type": "plot", "complex_data":{"plots": plot_infos, "options": chunk_option }}
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _save_plots(self, data, chunk_option:ChunkOption):
figs = []
i = 1
for m in mime_extensions:
if m in data["data"]:
fig_full_path, fig_relative_path = self._build_file(mime_extensions[m], i, chunk_option.fig_caption, chunk_option.name)
figs.append(fig_relative_path)
bfig = base64.b64decode(data["data"][m])
with open(fig_full_path, "wb") as f:
f.write(bfig)
i += 1
return figs
def _build_file(self, extension, index, fig_caption= None, name =None):
fig_name = ""
if fig_caption is not None:
fig_name = fig_name + "_" + fig_caption
if name is not None:
fig_name = fig_name + "_" + name
fig_name = fig_name + "_" + str(index)
fig_name = fig_name + "." + extension
return os.path.join(self.fig_folder, fig_name), os.path.join(self.fig_folder, fig_name)
def _interactive_plots(self, data):
if data["output_type"] is None:
return False
if data["output_type"] == "display_data":
if "text/html" in data["data"]:
print(self.executed_data.global_options.output.format)
if self.executed_data.global_options.output.format != "html":
raise Exception("output format is not HTML")
else:
t = {"type": "html_data", "str_data":data["data"]["text/html"].encode().decode() }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _organize_doc(self):
for index, chunk in enumerate(self.executed_data.chunks):
chunk_option = chunk.chunk.options
if chunk_option.name:
print(f"organizing {chunk_option.name}")
else:
print(f"organizing index {index}")
results = chunk.results
for result in results:
data = result.data
present = self._parse_raw(data, result.output_type)
if present:
continue
present = self._coder_string(data)
if present:
continue
present = self._raw_string(data)
if present:
continue
present = self._interactive_plots(data)
if present:
continue
present = self._raw_plots(data, chunk_option)
if present:
continue
print("not supported format", data)
t = []
c: OrganizedChunk
for c in self.organized_data.chunks:
last_chank: OrganizedChunk
if len(t)> 0:
last_chank = t[-1]
else:
last_chank = None
if last_chank is None:
t.append(c)
else:
if (c.type == last_chank.type) & (c.type != "plot"):
last_chank.str_data = last_chank.str_data + "\n" + c.str_data
else:
t.append(c)
self.organized_data.chunks = t
@staticmethod
def _clean_up(doc):
d = doc.replace(" ", "").replace("\n", "")
if len(d) != 0:
return doc
else:
return None
# markdown_file = self.executed_data.global_options.input_file_name.split(".")[0] + ".md"
# markdown_file = os.path.join(self.executed_data.global_options.output_file_dir , markdown_file)
# with open(markdown_file, "w") as f:
# text = "\n".join(self.formatted_doc)
# f.write(text)
| 37.5625 | 139 | 0.533888 | 8,590 | 0.952856 | 0 | 0 | 173 | 0.01919 | 0 | 0 | 1,463 | 0.162285 |
b974558759b358f82c2d72d79bab9c7dc3e35a76 | 12,467 | py | Python | qibullet/robot_virtual.py | mcaniot/qibullet | 9c5e1b319a18dd289263eb82f9d7303429bcbe21 | [
"Apache-2.0"
] | null | null | null | qibullet/robot_virtual.py | mcaniot/qibullet | 9c5e1b319a18dd289263eb82f9d7303429bcbe21 | [
"Apache-2.0"
] | null | null | null | qibullet/robot_virtual.py | mcaniot/qibullet | 9c5e1b319a18dd289263eb82f9d7303429bcbe21 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import sys
import pybullet
from qibullet.camera import *
from qibullet.link import Link
from qibullet.joint import Joint
IS_VERSION_PYTHON_3 = sys.version_info[0] >= 3
class RobotVirtual:
"""
Mother class representing a virtual robot
"""
def __init__(self, description_file):
"""
Constructor
Parameters:
description_file - The file giving the description of the virtual
robot. For now, only URDF is handled
"""
self.description_file = description_file
self.physics_client = 0
self.active_camera = None
self.camera_dict = dict()
self.joint_dict = dict()
self.link_dict = dict()
def loadRobot(self, translation, quaternion, physicsClientId=0):
"""
Loads the robot into a simulation, loads the joints and the links
descriptions. The joints are set to 0 rad.
Parameters:
translation - List containing 3 elements, the translation [x, y, z]
of the robot in the WORLD frame
quaternion - List containing 4 elements, the quaternion
[x, y, z, q] of the robot in the WORLD frame
physicsClientId - The id of the simulated instance in which the
robot is supposed to be loaded
Returns:
boolean - True if the method ran correctly, False otherwise
"""
try:
self.physics_client = physicsClientId
self.robot_model = pybullet.loadURDF(
self.description_file,
translation,
quaternion,
useFixedBase=False,
globalScaling=1.0,
physicsClientId=self.physics_client,
flags=pybullet.URDF_USE_SELF_COLLISION |
pybullet.URDF_USE_MATERIAL_COLORS_FROM_MTL)
except pybullet.error as e:
raise pybullet.error("Cannot load robot model: " + str(e))
for i in range(pybullet.getNumJoints(
self.robot_model,
physicsClientId=self.physics_client)):
if IS_VERSION_PYTHON_3:
# PYTHON 3 version needs a conversion bytes to str
joint_info = pybullet.getJointInfo(
self.robot_model,
i,
physicsClientId=self.physics_client)
self.link_dict[joint_info[12].decode('utf-8')] =\
Link(joint_info)
if joint_info[2] == pybullet.JOINT_PRISMATIC or\
joint_info[2] == pybullet.JOINT_REVOLUTE:
self.joint_dict[joint_info[1].decode('utf-8')] =\
Joint(joint_info)
else:
# PYTHON 2 Version
joint_info = pybullet.getJointInfo(
self.robot_model,
i,
physicsClientId=self.physics_client)
self.link_dict[joint_info[12]] = Link(joint_info)
if joint_info[2] == pybullet.JOINT_PRISMATIC or\
joint_info[2] == pybullet.JOINT_REVOLUTE:
self.joint_dict[joint_info[1]] = Joint(joint_info)
def getRobotModel(self):
"""
Returns the pybullet model to which the module is associated.
Returns:
robot_model - The pybullet model of the robot
"""
return self.robot_model
def getPhysicsClientId(self):
"""
Returns the id of the simulated instance in which the module is loaded.
Returns:
physics_client - The id of the simulation in which the robot
(possessing the module) is spawned
"""
return self.physics_client
def setAngles(self, joint_names, joint_values, percentage_speeds):
"""
Set angles on the robot's joints. Tests have to be performed by the
child class to guarantee the validity of the input parameters.
Parameters:
joint_names - List of string containing the name of the joints
to be controlled
joint_values - List of values corresponding to the angles in
radians to be applied
percentage_speeds - Percentages of the max speed to be used for
each joint, has to be strictly superior to 0 and inferior or equal
to 1
"""
try:
assert len(joint_names) ==\
len(joint_values) ==\
len(percentage_speeds)
assert all(
speed >= 0.0 and speed <= 1.0 for speed in percentage_speeds)
except AssertionError:
raise pybullet.error("Error in the setAngles parameters")
for joint_name, joint_value, percentage_speed in zip(
joint_names,
joint_values,
percentage_speeds):
joint_speed =\
self.joint_dict[joint_name].getMaxVelocity() *\
percentage_speed
pybullet.setJointMotorControl2(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
pybullet.POSITION_CONTROL,
targetPosition=joint_value,
maxVelocity=joint_speed,
force=self.joint_dict[joint_name].getMaxEffort(),
physicsClientId=self.physics_client)
def getAnglesPosition(self, joint_names):
"""
Gets the position of the robot's joints in radians. If one of the joint
doesn't exist, the method will raise a KeyError.
Parameters:
joint_names - List of string containing the names of the joints
Returns:
joint_positions - List of floats containing the joint's positions
"""
joint_positions = list()
for joint_name in joint_names:
joint_positions.append(pybullet.getJointState(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
physicsClientId=self.physics_client)[0])
return joint_positions
def getAnglesVelocity(self, joint_names):
"""
Gets the velocity of the robot's joints in rad/s. If one of the joint
doesn't exist, the method will raise a KeyError.
Parameters:
joint_names - List of string containing the names of the joints
Returns:
joint_velocities - List of floats containing the joint's velocities
"""
joint_velocities = list()
for joint_name in joint_names:
joint_velocities.append(pybullet.getJointState(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
physicsClientId=self.physics_client)[1])
return joint_velocities
def subscribeCamera(self, camera_id, resolution=Camera.K_QVGA):
"""
Subscribe to the camera holding the camera id. WARNING: at the moment,
only one camera can be subscribed.
Parameters:
camera_id - The id of the camera to be subscribed
resolution - CameraResolution object, the resolution of the camera
"""
try:
self.active_camera = self.camera_dict[camera_id]
self.active_camera.subscribe(resolution=resolution)
except KeyError:
print("This camera does not exist, use a valid camera id")
def unsubscribeCamera(self, camera_id):
"""
Unsubscribe from a camera, the one holding the camera id.
Parameters:
camera_id - The id of the camera to be unsubscribed
"""
try:
# If no active camera is found, nothing is unsubscribed
assert self.active_camera is not None
if self.active_camera.getCameraId() == camera_id:
self.active_camera.unsubscribe()
self.active_camera = None
except KeyError:
print("This camera does not exist, use a valid camera id")
except AssertionError:
pass
def getCameraFrame(self):
"""
Returns a camera frame. Be advised that the subscribeCamera method
needs to be called beforehand, otherwise a pybullet error will be
raised.
Returns:
frame - The current camera frame as a formatted numpy array,
directly exploitable from OpenCV
"""
try:
assert self.active_camera is not None
return self.active_camera.getFrame()
except AssertionError:
raise pybullet.error("No active camera, cannot retrieve any frame")
def getCameraResolution(self):
"""
Returns the resolution of the active camera. Be advised that the
subscribeCamera method needs to be called beforehand, otherwise a
pybullet error will be raised.
Returns:
resolution - a CameraResolution object describing the resolution of
the active camera
"""
try:
assert self.active_camera is not None
return self.active_camera.getResolution()
except KeyError:
raise pybullet.error("No active camera, resolution unavailable")
def getCameraLink(self):
"""
Returns the link of the active camera. Be advised that the
subscribeCamera method needs to be called beforehand, otherwise a
pybullet error will be raised.
Returns:
resolution - a Link object describing the link to which the active
camera is attached
"""
try:
assert self.active_camera is not None
return self.active_camera.getCameraLink()
except KeyError:
raise pybullet.error("No active camera, cannot retrieve any link")
def getActiveCamera(self):
"""
Returns the active camera of the robot.
Returns:
active_camera - Camera (CameraRgb or CameraDepth) object, the
active camera of the robot. If there is no active camera, a None is
returned
"""
return self.active_camera
def getPosition(self):
"""
Gets the position of the robot's base in the world frame.
Returns:
x - The position of the robot's base on the x axis, in meters
y - The positions of the robot's base on the y axis in meters
theta - The rotation of the robot's base on the z axis in meters
"""
position, quaternions = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
theta = pybullet.getEulerFromQuaternion(quaternions)[2]
return position[0], position[1], theta
def isSelfColliding(self, link_names):
"""
Specifies if a link is colliding with the rest of the virtual robot.
Parameters:
link_names - String or list of string containing the names of the
links to be checked for self collision. WARNING: only the links
with corresponding meshes should be used, otherwise the link cannot
self collide
Returns:
self_colliding - Boolean, if True at least one of the links is self
colliding
"""
try:
if type(link_names) is str:
assert link_names in self.link_dict.keys()
names = [link_names]
else:
assert set(link_names).issubset(self.link_dict.keys())
names = list(link_names)
for name in names:
contact_tuple = pybullet.getContactPoints(
bodyA=self.robot_model,
bodyB=self.robot_model,
linkIndexA=self.link_dict[name].getIndex(),
physicsClientId=self.physics_client)
contact_tuple += pybullet.getContactPoints(
bodyA=self.robot_model,
bodyB=self.robot_model,
linkIndexB=self.link_dict[name].getIndex(),
physicsClientId=self.physics_client)
if len(contact_tuple) != 0:
return True
return False
except AssertionError:
raise pybullet.error(
"Unauthorized link checking for self collisions")
| 35.31728 | 79 | 0.593006 | 12,256 | 0.983075 | 0 | 0 | 0 | 0 | 0 | 0 | 5,530 | 0.443571 |
b974d5d1bd35654f50415a8f7c66f3fb9a0316ab | 704 | py | Python | tests/test_formatter.py | hbraux/kafkacli | 5f7ed23150932b66b484fb43dd6210b6c0968776 | [
"MIT"
] | null | null | null | tests/test_formatter.py | hbraux/kafkacli | 5f7ed23150932b66b484fb43dd6210b6c0968776 | [
"MIT"
] | null | null | null | tests/test_formatter.py | hbraux/kafkacli | 5f7ed23150932b66b484fb43dd6210b6c0968776 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import json
from kafkacli.formatter import Formatter
sampleJson = json.loads('{"a":"s", "b":1}')
def test_print_default(capsys):
Formatter().print(sampleJson)
captured = capsys.readouterr()
assert captured.out == '{"a": "s", "b": 1}\n'
def test_print_idents(capsys):
Formatter(indents=True).print(sampleJson)
captured = capsys.readouterr()
assert captured.out == '{\n "a": "s",\n "b": 1\n}\n'
def test_print_colors(capsys):
Formatter(colors=True).print(sampleJson)
captured = capsys.readouterr()
assert captured.out == \
'{"a": \x1b[34m"s"\x1b[39m, "b": \x1b[31m1\x1b[39m}\n'
| 24.275862 | 62 | 0.640625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.245739 |
b9750e636d7a3d49a65558af431533fc2e745edb | 187 | py | Python | src/jobs/forms.py | arc198/DJANGO-JOB-SITE | d9547c4ee85751677ba6458380b609973c3b4a8d | [
"MIT"
] | 20 | 2018-05-04T18:42:35.000Z | 2021-03-18T07:15:12.000Z | src/jobs/forms.py | fleepgeek/django-jobsite | d9547c4ee85751677ba6458380b609973c3b4a8d | [
"MIT"
] | 5 | 2020-02-11T22:22:33.000Z | 2021-06-10T20:18:05.000Z | src/jobs/forms.py | arc198/DJANGO-JOB-SITE | d9547c4ee85751677ba6458380b609973c3b4a8d | [
"MIT"
] | 8 | 2018-05-04T19:03:23.000Z | 2020-09-23T00:24:46.000Z | from django import forms
from .models import Application
class ApplicationForm(forms.ModelForm):
class Meta:
model = Application
fields = ('resume', 'cover_letter',) | 23.375 | 44 | 0.700535 | 128 | 0.684492 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.117647 |
b975e6fb7fb3fa8849afb4e4ce41618c2ce94c1b | 451 | py | Python | src/test/tests/unit/protocol.py | ylee88/visit | 8e0920996d84fef70a7014b0d770360918d849d5 | [
"BSD-3-Clause"
] | 1 | 2022-01-27T23:52:04.000Z | 2022-01-27T23:52:04.000Z | src/test/tests/unit/protocol.py | ylee88/visit | 8e0920996d84fef70a7014b0d770360918d849d5 | [
"BSD-3-Clause"
] | null | null | null | src/test/tests/unit/protocol.py | ylee88/visit | 8e0920996d84fef70a7014b0d770360918d849d5 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: protocolo.py
#
# Tests: vistprotocol unit test
#
# Mark C. Miller, Tue Jan 11 10:19:23 PST 2011
# ----------------------------------------------------------------------------
tapp = visit_bin_path("visitprotocol")
res = sexe(tapp,ret_output=True)
if res["return_code"] == 0:
excode = 111
else:
excode = 113
Exit(excode)
| 26.529412 | 78 | 0.432373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.702882 |
b97645cb1bc48b7d30c6b37e139952912087b791 | 3,348 | py | Python | pyMazeBacktrack.py | Dozed12/pyMazeBacktrack | aaa2a902fdca17dca6e2ee00e672b6bb38da5639 | [
"MIT"
] | 2 | 2019-02-22T10:35:25.000Z | 2020-08-11T01:25:12.000Z | pyMazeBacktrack.py | Dozed12/pyMazeBacktrack | aaa2a902fdca17dca6e2ee00e672b6bb38da5639 | [
"MIT"
] | null | null | null | pyMazeBacktrack.py | Dozed12/pyMazeBacktrack | aaa2a902fdca17dca6e2ee00e672b6bb38da5639 | [
"MIT"
] | null | null | null | import libtcodpy as libtcod
from random import randint
nSquares = 30
nTiles = nSquares * 2 + 1
SCREEN_WIDTH = nTiles
SCREEN_HEIGHT = nTiles
libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)
def CheckDir(x,y,size,direction,table):
if direction == 1:
if y - 2 <= 0:
return 0
if table[x][y-2] == white:
return 0
elif direction == 2:
if x + 2 >= size:
return 0
if table[x+2][y] == white:
return 0
elif direction == 3:
if y + 2 >= size:
return 0
if table[x][y+2] == white:
return 0
elif direction == 4:
if x - 2 <= 0:
return 0
if table[x-2][y] == white:
return 0
return 1
def Possible(x,y,table,size):
if x+2 < size:
if table[x+2][y] == black:
return 1
if x-2 > 0:
if table[x-2][y] == black:
return 1
if y+2 < size:
if table[x][y+2] == black:
return 1
if y-2 > 0:
if table[x][y-2] == black:
return 1
return 0
black = libtcod.black
white = libtcod.white
Table = [[0 for i in range(nTiles)]for i in range(nTiles)]
for x in range(nTiles):
for y in range(nTiles):
Table[x][y] = black
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
Memory = []
CurrX = 1
CurrY = 1
Table[CurrX][CurrY] = white
end = 0
while end == 0:
while Possible(CurrX,CurrY,Table,nTiles):
Dir = randint(1,4)
while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:
Dir = randint(1,4)
if Dir == 1:
Table[CurrX][CurrY - 1] = white
CurrY -= 2
Table[CurrX][CurrY] = white
elif Dir == 2:
Table[CurrX + 1][CurrY] = white
CurrX += 2
Table[CurrX][CurrY] = white
elif Dir == 3:
Table[CurrX][CurrY + 1] = white
CurrY += 2
Table[CurrX][CurrY] = white
elif Dir == 4:
Table[CurrX - 1][CurrY] = white
CurrX -= 2
Table[CurrX][CurrY] = white
Memory.append(Dir)
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
while Possible(CurrX,CurrY,Table,nTiles) == 0:
MemorySize = len(Memory)
Dir = Memory[MemorySize-1]
if Dir == 1:
CurrY += 2
elif Dir == 2:
CurrX -= 2
elif Dir == 3:
CurrY -= 2
elif Dir == 4:
CurrX += 2
del Memory[MemorySize-1]
if CurrX == 1 and CurrY == 1:
end = 1
break
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
libtcod.console_wait_for_keypress(True)
| 20.168675 | 106 | 0.496416 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.014337 |
b978586a0e39802db346feaf3a0aa1c91c336f05 | 3,011 | py | Python | source/tests/test_resources.py | aws-solutions/maintaining-personalized-experiences-with-machine-learning | 3f6f1b0069df4828eae9b0835b717500189e4f71 | [
"Apache-2.0"
] | 6 | 2021-09-23T16:33:24.000Z | 2022-03-31T11:45:13.000Z | source/tests/test_resources.py | aws-solutions/maintaining-personalized-experiences-with-machine-learning | 3f6f1b0069df4828eae9b0835b717500189e4f71 | [
"Apache-2.0"
] | 4 | 2021-09-24T21:34:14.000Z | 2022-01-27T22:11:08.000Z | source/tests/test_resources.py | aws-solutions/maintaining-personalized-experiences-with-machine-learning | 3f6f1b0069df4828eae9b0835b717500189e4f71 | [
"Apache-2.0"
] | 9 | 2021-09-23T23:24:46.000Z | 2022-02-12T04:53:16.000Z | # ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
import pytest
from shared.resource import (
DatasetGroup,
Schema,
Dataset,
DatasetImportJob,
Solution,
SolutionVersion,
Campaign,
EventTracker,
BatchSegmentJob,
BatchInferenceJob,
)
@pytest.mark.parametrize(
"klass,camel,dash,snake",
[
(DatasetGroup, "datasetGroup", "dataset-group", "dataset_group"),
(Schema, "schema", "schema", "schema"),
(Dataset, "dataset", "dataset", "dataset"),
(
DatasetImportJob,
"datasetImportJob",
"dataset-import-job",
"dataset_import_job",
),
(Solution, "solution", "solution", "solution"),
(SolutionVersion, "solutionVersion", "solution-version", "solution_version"),
(Campaign, "campaign", "campaign", "campaign"),
(EventTracker, "eventTracker", "event-tracker", "event_tracker"),
(
BatchInferenceJob,
"batchInferenceJob",
"batch-inference-job",
"batch_inference_job",
),
(BatchSegmentJob, "batchSegmentJob", "batch-segment-job", "batch_segment_job"),
],
ids=[
"DatasetGroup",
"Schema",
"Dataset",
"DatasetImportJob",
"Solution",
"SolutionVersion",
"Campaign",
"EventTracker",
"BatchInferenceJob",
"BatchSegmentJob,",
],
)
def test_resource_naming(klass, camel, dash, snake):
assert klass().name.camel == camel
assert klass().name.dash == dash
assert klass().name.snake == snake
| 42.408451 | 120 | 0.454334 | 0 | 0 | 0 | 0 | 1,332 | 0.442378 | 0 | 0 | 2,027 | 0.673198 |
b9787b11fbcd5779df09a2f0f27e44e75ad576ac | 1,870 | py | Python | app_venv/Lib/site-packages/phonenumbers/data/region_AG.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | app_venv/Lib/site-packages/phonenumbers/data/region_AG.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | app_venv/Lib/site-packages/phonenumbers/data/region_AG.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | """Auto-generated file, do not edit by hand. AG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AG = PhoneMetadata(id='AG', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(?:268|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='268(?:4(?:6[0-38]|84)|56[0-2])\\d{4}', example_number='2684601234', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='268(?:464|7(?:1[3-9]|[28]\\d|3[0246]|64|7[0-689]))\\d{4}', example_number='2684641234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='52(?:355[0-46-9]|4(?:5(?:2[024-9]|5[0-46-9])|60[1-9]|9(?:2[0-5]|49)))\\d{4}|52(?:3(?:[2-46-9][02-9]|5[02-46-9])|4(?:[2-478][02-9]|5[034]|6[2-9]|9[05-9])|7[2-4]\\d)\\d{5}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[1256]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
voip=PhoneNumberDesc(national_number_pattern='26848[01]\\d{4}', example_number='2684801234', possible_length=(10,), possible_length_local_only=(7,)),
pager=PhoneNumberDesc(national_number_pattern='26840[69]\\d{4}', example_number='2684061234', possible_length=(10,), possible_length_local_only=(7,)),
national_prefix='1',
national_prefix_for_parsing='1|([457]\\d{6})$',
national_prefix_transform_rule='268\\1',
leading_digits='268',
mobile_number_portable_region=True)
| 103.888889 | 352 | 0.711765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 638 | 0.341176 |
b97884a1b2bbd76cce01bb9efe2744d31832af25 | 2,182 | py | Python | gradefiles-send.py | lapets/bu-gsubmit-grading | 69c40a763908be1c954dce3e5e5aab854ac379ff | [
"MIT"
] | 3 | 2016-10-03T15:29:20.000Z | 2019-06-28T17:33:06.000Z | gradefiles-send.py | lapets/bu-gsubmit-grading | 69c40a763908be1c954dce3e5e5aab854ac379ff | [
"MIT"
] | null | null | null | gradefiles-send.py | lapets/bu-gsubmit-grading | 69c40a763908be1c954dce3e5e5aab854ac379ff | [
"MIT"
] | null | null | null | #####################################################################
##
## gradefiles-send.py
##
## Script to send grade files by email to enrolled students; the
## input grade file names should correspond to the user names of
## the students.
##
##
from email.mime.text import MIMEText # For creating a message string.
from subprocess import Popen, PIPE # For sending email on linux.
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## Sending a simple email message.
##
def send(txt, courseNumber, task, sender, targets):
msg = MIMEText(txt)
msg["From"] = sender + "@bu.edu"
msg["To"] = ",".join([target + "@bu.edu" for target in targets])
msg["Cc"] = sender + "@bu.edu"
msg["Subject"] = "CS " + courseNumber + " " + task + " grade"
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(bytes(msg.as_string(), 'UTF-8'))
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 6\
and (int(sys.argv[1][0:3]) in range(100,1000))\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1] # Accepts course names like "591 X1."
season = sys.argv[2]
year = sys.argv[3]
task = sys.argv[4]
sender = sys.argv[5]
else:
print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Send the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
targets = file.split('.')[0].split("_")
send(txt, courseNumber, task, sender, targets)
print('Sent grade file to ' + str(targets) + '.')
#eof | 33.569231 | 112 | 0.519707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,177 | 0.539413 |
b9789c0f2981942a54633089abdf3245b58a73a3 | 1,227 | py | Python | Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py | GalAster/16 | 47560a2132fbe4dda35a35dedfd7d8e6a8acc35a | [
"Unlicense"
] | 3 | 2019-10-03T01:51:38.000Z | 2019-10-04T16:15:43.000Z | Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py | GalAster/16 | 47560a2132fbe4dda35a35dedfd7d8e6a8acc35a | [
"Unlicense"
] | null | null | null | Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py | GalAster/16 | 47560a2132fbe4dda35a35dedfd7d8e6a8acc35a | [
"Unlicense"
] | 1 | 2020-03-17T12:58:52.000Z | 2020-03-17T12:58:52.000Z | import os
import pickle
import tensorflow as tf
import wolframclient.serializers as wxf
name = 'karras2018iclr-celebahq-1024x1024'
file = open(name + '.pkl', 'rb')
sess = tf.InteractiveSession()
G, D, Gs = pickle.load(file)
saver = tf.train.Saver()
save_path = "./target/" + name + "/"
model_name = 'model'
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path_full = os.path.join(save_path, model_name)
saver.save(sess, save_path_full)
ckpt = tf.train.get_checkpoint_state(save_path)
reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)
all_variables = list(reader.get_variable_to_shape_map().keys())
npy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))
wxf.export(npy, name + '.wxf', target_format='wxf')
# Save as protobuf
with tf.Session() as sess:
tf.initialize_all_variables().run()
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
# output_node_names=['G_paper_1/images_out']
output_node_names=['G_paper_1/ToRGB_lod0/add']
)
with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型
file.write(output_graph_def.SerializeToString()) # 序列化输出
| 34.083333 | 74 | 0.726976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.173494 |
b978dfcb152bc099b2de54896ed9a54dfbc29639 | 6,890 | py | Python | src/moveGoogle.py | Quanta-Robotics/Robot-Blueberry | 7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da | [
"MIT"
] | 25 | 2021-06-08T07:09:30.000Z | 2021-12-30T06:28:35.000Z | src/moveGoogle.py | ICT-CoU/Robot-Blueberry | d19fd1be037df9d67de64df57a87006d74cd6c43 | [
"MIT"
] | 2 | 2021-05-23T12:54:51.000Z | 2021-06-07T17:47:56.000Z | src/moveGoogle.py | ICT-CoU/Robot-Blueberry | d19fd1be037df9d67de64df57a87006d74cd6c43 | [
"MIT"
] | 14 | 2021-06-08T13:02:28.000Z | 2021-12-30T20:07:18.000Z | #!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
def readYaml():
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servo = yaml.load(conf, Loader=yaml.FullLoader)
return servo
def writeYaml(s=None):
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:
if s==None:
yaml.dump(servo,conf)
else:
yaml.dump(s,conf)
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
def changeDegree(pin,newDegree,time1=0.05,update=5):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,update):
for i in range(0,pinSize):
if Current[pin[i]]<newDegree[i]:
Current[pin[i]] += update
elif Current[pin[i]]>newDegree[i]:
Current[pin[i]] -= update
for i in range(0,pinSize):
hand.servo[pin[i]].angle = Current[pin[i]]
servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]
writeYaml()
time.sleep(time1)
def takePosition():
changeDegree([7,8],[180,0])
changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])
def changeDegreeGpio(pin,degree,update,duration):
pinSize = len(pin)
for i in range(0,pinSize):
p = pin[i]
if CurrentGpio[p]>degree[i]:
update = -update
for deg in range(CurrentGpio[p],degree[i],update):
duty = deg/18
duty+=2
Servo[p].ChangeDutyCycle(duty)
time.sleep(duration)
CurrentGpio[p]=degree[i]
writeYaml()
def Run(a, b, c, d, x):
GPIO.output(Motor1['input1'], GPIO.LOW)
GPIO.output(Motor1['input2'], GPIO.LOW)
GPIO.output(Motor2['input1'], GPIO.LOW)
GPIO.output(Motor2['input2'], GPIO.LOW)
if a==1:
GPIO.output(Motor1['input1'], GPIO.HIGH)
if b==1:
GPIO.output(Motor1['input2'], GPIO.HIGH)
if c==1:
GPIO.output(Motor2['input1'], GPIO.HIGH)
if d==1:
GPIO.output(Motor2['input2'], GPIO.HIGH)
EN2.ChangeDutyCycle(x)
EN1.ChangeDutyCycle(x)
def Stop():
Run(0,0,0,0,0)
def Start_Slow(a, b, c, d):
for i in range(0,100,20):
Run(a,b,c,d,i)
time.sleep(0.5)
def Stop_Slow(a,b,c,d):
for i in range(100,0,-20):
Run(a,b,c,d,i)
time.sleep(0.5)
def yes(times=3):
for i in range(0,times):
changeDegree([0],[30])
time.sleep(0.08)
changeDegree([0],[0])
time.sleep(0.08)
def no(times=3):
for i in range(0,times):
changeDegree([15],[70],5,0.05)
time.sleep(0.2)
changeDegree([15],[110],5,0.05)
time.sleep(0.2)
changeDegree([15],[90],5,0.05)
def move_head(times=3):
for i in range(0,times):
changeDegree([0],[20])
changeDegreeGpio([0],[80],5,0.05)
changeDegree([0],[0])
changeDegreeGpio([0],[100],5,0.05)
changeDegreeGpio([0],[90],10,0.01)
def random0():
r = random.randrange(1,10000000)%3
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
elif(r==2):
changeDegreeGpio([0],[120],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
else:
changeDegreeGpio([0],[60],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
def random1():
r = random.randrange(1,3)
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([3],[50])
changeDegree([9],[100])
changeDegree([9],[60])
changeDegree([3],[0])
elif(r==2):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([4],[120])
changeDegree([10],[140])
changeDegree([10],[180])
changeDegree([4],[170])
else:
changeDegree([3,4],[50,120])
changeDegree([9,10],[100,140])
changeDegree([9,10],[60,180])
changeDegree([3,4],[0,180])
def random2():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]
for i in range(0,15):
r = select[i%len(select)]%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def random3():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
for i in range(0,15):
r = random.randrange(1,1000000)%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
takePosition()
def randomCall(t):
changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])
pin = [5,6,7,8]
deg = [[80,50,100,70],[110,90,110,90]]
select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]
ok = [0,0,0,0]
ln = len(select)
for i in range(0,t*3):
r = select[i%16]%4
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def expression(t):
print (' i got value of t is : ',t)
if(t==0):
random0()
elif(t==1):
random1()
elif(t==2):
random2()
elif(t==3):
random3()
else:
randomCall(t)
def speakOnline(t):
expression(t)
def speakOffline(speech):
t = int(len(speech)/15)
print ('Offline t value is : ',t)
p1 = multiprocessing.Process(target=expression,args=[t])
p1.start()
say(speech)
| 25.330882 | 154 | 0.560377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.066183 |
b978fbbcd4002601ca1e2723cae4385002e671d8 | 2,063 | py | Python | src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from uuid import uuid4
from sqlalchemy import Index, Column, Text, Table, ForeignKey
from sqlalchemy.orm import object_session
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
spoken_association_table = Table(
'spoken_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
written_association_table = Table(
'written_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
mother_tongue_association_table = Table(
'mother_tongue_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
class Language(Base):
__tablename__ = 'languages'
__table_args__ = (
Index('unique_name', 'name', unique=True),
)
id = Column(UUID, primary_key=True, default=uuid4)
name = Column(Text, nullable=False)
@property
def speakers_count(self):
session = object_session(self)
return session.query(
spoken_association_table).filter_by(lang_id=self.id).count()
@property
def writers_count(self):
session = object_session(self)
return session.query(
written_association_table).filter_by(lang_id=self.id).count()
@property
def native_speakers_count(self):
"""Having it as mother tongue..."""
session = object_session(self)
return session.query(
mother_tongue_association_table).filter_by(lang_id=self.id).count()
@property
def deletable(self):
return (
self.speakers_count
+ self.writers_count
+ self.native_speakers_count
) == 0
| 25.469136 | 79 | 0.650994 | 1,028 | 0.498303 | 0 | 0 | 773 | 0.374697 | 0 | 0 | 305 | 0.147843 |
b97a0b2a9f0b601569ce8973596517ed7d8790ec | 3,588 | py | Python | tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py | djemeljanovs/tfjs | ee4430cd7a04283ec09184a3fe9d3fb27496f1dc | [
"Apache-2.0"
] | null | null | null | tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py | djemeljanovs/tfjs | ee4430cd7a04283ec09184a3fe9d3fb27496f1dc | [
"Apache-2.0"
] | null | null | null | tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py | djemeljanovs/tfjs | ee4430cd7a04283ec09184a3fe9d3fb27496f1dc | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
# Custom op name for fused depthwise conv2d
FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative'
# The grappler op name for fused MatMul which starts with '_'
FUSED_MATMUL = '_FusedMatMul'
def node_from_map(node_map, name):
"""Pulls a node def from a dictionary for a given name.
Args:
node_map: Dictionary containing an entry indexed by name for every node.
name: Identifies the node we want to find.
Returns:
NodeDef of the node with the given name.
Raises:
ValueError: If the node isn't present in the dictionary.
"""
stripped_name = node_name_from_input(name)
if stripped_name not in node_map:
raise ValueError("No node named '%s' found in map." % name)
return node_map[stripped_name]
def values_from_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError(
"Node named '%s' should be a Const op for values_from_const." %
node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
# Whether to scale by gamma after normalization.
def scale_after_normalization(node):
if node.op == "BatchNormWithGlobalNormalization":
return node.attr["scale_after_normalization"].b
return True
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove):
"""Clean up the graph def by removing the skipped nodes and clean up the nodes
with inputs that have been removed.
Args:
input_graph_def: GraphDef object to be cleaned.
node_to_skip: Dict with node names to be skipped.
inputs_to_remove: List of nodes to be removed from inputs of all nodes.
Returns:
GraphDef that has been cleaned.
"""
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in nodes_to_skip:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
for value in inputs_to_remove:
for i, input_node in enumerate(new_node.input):
if input_node == value.name:
new_node.input[i] = value.input[0]
result_graph_def.node.extend([new_node])
result_graph_def.library.CopyFrom(input_graph_def.library)
result_graph_def.versions.CopyFrom(input_graph_def.versions)
return result_graph_def
| 33.849057 | 80 | 0.726031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,035 | 0.567168 |
b97af59ee4283114481f3e83dc8e3cf6244bb61c | 1,014 | py | Python | loss_fn/classification_loss_fns/binary_cross_entropy.py | apple/ml-cvnets | 84d992f413e52c0468f86d23196efd9dad885e6f | [
"AML"
] | 209 | 2021-10-30T08:32:10.000Z | 2022-03-31T16:18:03.000Z | loss_fn/classification_loss_fns/binary_cross_entropy.py | apple/ml-cvnets | 84d992f413e52c0468f86d23196efd9dad885e6f | [
"AML"
] | 12 | 2021-12-04T10:47:11.000Z | 2022-03-31T15:39:40.000Z | loss_fn/classification_loss_fns/binary_cross_entropy.py | apple/ml-cvnets | 84d992f413e52c0468f86d23196efd9dad885e6f | [
"AML"
] | 50 | 2021-11-01T08:15:02.000Z | 2022-03-29T08:17:34.000Z | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch.nn import functional as F
from torch import Tensor
import argparse
from . import register_classification_loss_fn
from .. import BaseCriteria
@register_classification_loss_fn(name="binary_cross_entropy")
class ClsBinaryCrossEntropy(BaseCriteria):
"""Binary CE for classification tasks"""
def __init__(self, opts, *args, **kwargs) -> None:
super().__init__()
def forward(
self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs
) -> Tensor:
if target.dim() != prediction.dim():
target = F.one_hot(target, num_classes=prediction.shape[-1])
return F.binary_cross_entropy_with_logits(
input=prediction,
target=target.to(prediction.dtype),
weight=None,
reduction="sum",
)
def __repr__(self) -> str:
return "{}()".format(self.__class__.__name__)
| 28.166667 | 87 | 0.667653 | 691 | 0.68146 | 0 | 0 | 753 | 0.742604 | 0 | 0 | 173 | 0.170611 |
b97c7f15dd61f4851cffcb3982337f852b3b8da5 | 576 | py | Python | Sorting/insertion_sort.py | lakshyarawal/pythonPractice | 4b400342198a8270c5ac0c6306afb555f927c6c1 | [
"MIT"
] | null | null | null | Sorting/insertion_sort.py | lakshyarawal/pythonPractice | 4b400342198a8270c5ac0c6306afb555f927c6c1 | [
"MIT"
] | null | null | null | Sorting/insertion_sort.py | lakshyarawal/pythonPractice | 4b400342198a8270c5ac0c6306afb555f927c6c1 | [
"MIT"
] | null | null | null | """ Insertion Sort Algorithm:"""
"""Implementation"""
def insertion_sort(arr) -> list:
n = len(arr)
for i in range(1, n):
swap_index = i
for j in range(i-1, -1, -1):
if arr[swap_index] < arr[j]:
arr[swap_index], arr[j] = arr[j], arr[swap_index]
swap_index -= 1
else:
break
return arr
def main():
arr_input = [10, 5, 30, 1, 2, 5, 10, 10]
a2 = insertion_sort(arr_input)
print(a2)
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 19.2 | 65 | 0.522569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.173611 |
b97c828450c34038ee92e089e3f2b951d2113017 | 903 | py | Python | nipype/interfaces/spm/__init__.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/interfaces/spm/__init__.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/interfaces/spm/__init__.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for spm."""
from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname,
scans_for_fnames)
from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp,
Coregister, Normalize, Normalize12, Segment,
Smooth, NewSegment, DARTEL, DARTELNorm2MNI,
CreateWarped, VBMSegment)
from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold,
OneSampleTTestDesign, TwoSampleTTestDesign,
PairedTTestDesign, MultipleRegressionDesign)
from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice,
ApplyInverseDeformation, ResliceToReference, DicomImport)
| 53.117647 | 77 | 0.653378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.187154 |
b97cd7905f5c596cb6d79b67c2c80e83907421d9 | 8,257 | py | Python | network.py | tobloef/neural-network | bd05a8b9eccc0f5a973782247d39f9b5aa33156c | [
"MIT"
] | 3 | 2018-01-06T22:27:58.000Z | 2018-08-12T20:29:51.000Z | network.py | tobloef/neural-network | bd05a8b9eccc0f5a973782247d39f9b5aa33156c | [
"MIT"
] | 1 | 2018-03-31T18:49:56.000Z | 2018-04-19T04:52:33.000Z | network.py | tobloef/neural-network | bd05a8b9eccc0f5a973782247d39f9b5aa33156c | [
"MIT"
] | null | null | null | import numpy as np
from mathUtils import *
class Network(object):
"""
Model for a feedforward Neural Network that use backpropagation with stochastic gradient decent.
"""
def __init__(self, layerSizes, biasVectors, weightMatrices):
"""
Initialise the network with a list of layer sizes and lists for biases and weights for the neurons in the network. The first layer is the input layer and the last layer is the output layer.
"""
self.layerSizes = layerSizes
self.biasVectors = biasVectors
self.weightMatrices = weightMatrices
@staticmethod
def generateRandomNetwork(layerSizes):
"""
Initialise a new network with random weights and biases. Input and output layers are included in the layerSizes list. The random weights and biases are generated using a Gaussian distribution, so the results are more probable to be around 0.
"""
biasVectors = []
"""Generate biases for each neuron in each layer, except the input layer."""
for size in layerSizes[1:]:
"""
np.random.randn generates arrays of arrays of random numbers, based on the paramters.
np.random.randn(3,2) will generate an array of 3 arrays with 2 random numbers.
"""
biasVectors.append(np.random.randn(size, 1))
"""Generate weights for connections between layers."""
weightMatrices = []
for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]):
weightMatrices.append(np.random.randn(prevSize, size))
return Network(layerSizes, biasVectors, weightMatrices)
def getOutputs(self, inputs):
"""Return a vector of the network's outputs based on the given inputs, using feedforward."""
activations = inputs
for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):
"""
For every layer, get the bias vector and the weight matrix. Then get dot product between the weight matrix and the output vector and add the bias vector. This is the activation vector for the current layer.
"""
zVector = np.dot(weightMatrix, activations) + biasVector
activations = sigmoid(zVector)
return activations
def train(self, data, epochs, batchSize, rate, testData=None):
"""
Train the neural network using stochastic gradient descent. Smaller batches of random samples from the training are used to reduce the training time. The training date is a list of tuples (inputs, expected outputs). The learning rate is how much to change the values each batch.
"""
print("Training network with shape {}, batch size {} and learning rate {} for {} epochs...".format(self.layerSizes, batchSize, rate, epochs))
for e in range(epochs):
np.random.shuffle(data)
batches = []
for i in range(0, len(data), batchSize):
batches.append(data[i:i+batchSize])
for batch in batches:
self._tuneNetwork(batch, rate)
if (testData):
result = self._evaluate(testData)
print("Epoch #{} completed with {:.2f}% correctness.".format(e+1, 100/len(testData)*result))
else:
print("Epoch #{} completed.".format(e))
def _tuneNetwork(self, batch, rate):
"""
Tune the weights and biases of the network by using backpropagation with gradient descend.
"""
"""
Setup matrix and vector based on the weight matrix and bias vector filled with zeroes. This is used for storing each change to make for each vector, for each set of training date.
"""
sumBiasVectors = []
for biasVector in self.biasVectors:
sumBiasVectors.append(np.zeros(biasVector.shape))
sumWeightMatrices = []
for weightMatrix in self.weightMatrices:
sumWeightMatrices.append(np.zeros(weightMatrix.shape))
for inputs, expected in batch:
"""
Get a matrix/vector with the required changes to the network, based on that set of training data, and add it to a set of matrix/vector totalling the changes needed from all the training data.
"""
deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected)
newSumBiasVectors = []
for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors):
newSumBiasVectors.append(totalBiasVector + deltaBiasVector)
sumBiasVectors = newSumBiasVectors
newSumWeightMatrices = []
for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices):
newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix)
sumWeightMatrices = newSumWeightMatrices
"""
Take each change for each set of training data, get the average of these and subtract them from the current weights and biases. Then use these as the new weights and biases.
"""
newBiasVectors = []
for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors):
newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector)
newWeightMatrices = []
for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices):
newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix)
self.biasVectors = newBiasVectors
self.weightMatrices = newWeightMatrices
def _backpropagate(self, inputs, expected):
"""
Return a tuple with gradient of the cost function for each bias and weight, in the format (vector of bias changes, matrix of weight changes), for the specified set of training data.
"""
deltaBiasVectors = []
for biasVector in self.biasVectors:
deltaBiasVectors.append(np.zeros(biasVector.shape))
deltaWeightMatrices = []
for weightMatrix in self.weightMatrices:
deltaWeightMatrices.append(np.zeros(weightMatrix.shape))
"""Store all activations for the entire network, starting with the input layer."""
activationVector = inputs
activationVectors = [inputs]
"""Find the z-vector for layer in the network"""
zVectors = []
for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):
zVector = np.dot(weightMatrix, activationVector) + biasVector
zVectors.append(zVector)
activationVector = sigmoid(zVector)
activationVectors.append(activationVector)
"""
* Start with output compared to expected, tune weights and biases based on the derivative of the cost function with respect to the weight/bias.
* Then move onto each hidden layer and the input layer.
"""
deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1])
deltaBiasVectors[-1] = deltaBiasVector
deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose())
for l in range(-2, -len(self.layerSizes), -1):
# Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead
weightMatrix = self.weightMatrices[l+1].transpose()
sigmoidDeriv = sigmoidDerivative(zVectors[l])
deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv
deltaBiasVectors[l] = deltaBiasVector
deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose())
return (deltaBiasVectors, deltaWeightMatrices)
def _evaluate(self, testData):
"""Test the network with the specified test data and return the number of correct guesses."""
correctGuesses = 0
for inputs, expected in testData:
"""Increment correct guesses if the most active output is the expected one."""
outputs = self.getOutputs(inputs)
guess = np.argmax(outputs)
if (guess == expected):
correctGuesses += 1
return correctGuesses | 53.270968 | 286 | 0.657987 | 8,213 | 0.994671 | 0 | 0 | 1,053 | 0.127528 | 0 | 0 | 3,275 | 0.396633 |
b97d4675d330154e0b12b91fbd601affd888ea29 | 1,901 | py | Python | examples/airflow/dags/etl_orders_7_days.py | phixMe/marquez | 06d71635369893b371a8a9c9e7023f11d7cbb1f8 | [
"Apache-2.0"
] | null | null | null | examples/airflow/dags/etl_orders_7_days.py | phixMe/marquez | 06d71635369893b371a8a9c9e7023f11d7cbb1f8 | [
"Apache-2.0"
] | null | null | null | examples/airflow/dags/etl_orders_7_days.py | phixMe/marquez | 06d71635369893b371a8a9c9e7023f11d7cbb1f8 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from marquez_airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.utils.dates import days_ago
default_args = {
'owner': 'datascience',
'depends_on_past': False,
'start_date': days_ago(1),
'email_on_failure': False,
'email_on_retry': False,
'email': ['[email protected]']
}
dag = DAG(
'etl_orders_7_days',
schedule_interval='@hourly',
catchup=False,
default_args=default_args,
description='Loads newly placed orders weekly.'
)
t1 = PostgresOperator(
task_id='if_not_exists',
postgres_conn_id='food_delivery_db',
sql='''
CREATE TABLE IF NOT EXISTS orders_7_days (
order_id INTEGER REFERENCES orders(id),
placed_on TIMESTAMP NOT NULL,
discount_id INTEGER REFERENCES discounts(id),
menu_id INTEGER REFERENCES menus(id),
restaurant_id INTEGER REFERENCES restaurants(id),
menu_item_id INTEGER REFERENCES menu_items(id),
category_id INTEGER REFERENCES categories(id)
);''',
dag=dag
)
t2 = PostgresOperator(
task_id='tuncate',
postgres_conn_id='food_delivery_db',
sql='TRUNCATE TABLE orders_7_days;',
dag=dag
)
t3 = PostgresOperator(
task_id='insert',
postgres_conn_id='food_delivery_db',
sql='''
INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id)
SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id
FROM orders AS o
INNER JOIN menu_items AS mi
ON mi.id = o.menu_item_id
INNER JOIN categories AS c
ON c.id = mi.category_id
INNER JOIN menus AS m
ON m.id = c.menu_id
WHERE o.placed_on >= NOW() - interval '7 days'
''',
dag=dag
)
t1 >> t2 >> t3
| 29.246154 | 135 | 0.681746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,256 | 0.660705 |
b97deb7d2bd255cd9a3d9f169d969333b63452ec | 313 | py | Python | sample/pizza.py | marianarmorgado/python-starter | 8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2 | [
"MIT"
] | null | null | null | sample/pizza.py | marianarmorgado/python-starter | 8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2 | [
"MIT"
] | null | null | null | sample/pizza.py | marianarmorgado/python-starter | 8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2 | [
"MIT"
] | null | null | null | # store information about a pizza being ordered
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra vegan cheese']
}
# summarize the order
print("You ordered a " + pizza['crust'] + "-crust pizza" +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping) | 26.083333 | 59 | 0.645367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.651757 |
b97e1419e0e45b84ecc462227c812c10beb92718 | 181 | py | Python | YouTube/CursoEmVideo/python/ex012.py | Fh-Shadow/Progamando | f496d83c36e9a079ed06b4e7c34396c57f539de9 | [
"MIT"
] | null | null | null | YouTube/CursoEmVideo/python/ex012.py | Fh-Shadow/Progamando | f496d83c36e9a079ed06b4e7c34396c57f539de9 | [
"MIT"
] | null | null | null | YouTube/CursoEmVideo/python/ex012.py | Fh-Shadow/Progamando | f496d83c36e9a079ed06b4e7c34396c57f539de9 | [
"MIT"
] | null | null | null | a = float(input('Qual é o preço do produto? R$'))
d = a - (a * 23 / 100)
print('O produto que custava R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))
| 45.25 | 106 | 0.607735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.648649 |
b97e5feb1052b87d359d8e3d9f63ba930bff8e66 | 15,038 | py | Python | dnnlib/submission/submit.py | gperdrizet/gansformer | c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5 | [
"MIT"
] | 1,172 | 2021-03-02T02:00:44.000Z | 2022-03-31T02:46:45.000Z | dnnlib/submission/submit.py | gperdrizet/gansformer | c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5 | [
"MIT"
] | 37 | 2021-03-03T14:11:11.000Z | 2022-03-12T15:40:15.000Z | dnnlib/submission/submit.py | gperdrizet/gansformer | c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5 | [
"MIT"
] | 138 | 2021-03-02T06:37:10.000Z | 2022-03-30T14:59:09.000Z | # Submit a function to be run either locally or in a computing cluster.
# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,
# and network recompilation.
import copy
import inspect
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import sys
import time
import traceback
from enum import Enum
from .. import util
from ..util import EasyDict
from . import internal
class SubmitTarget(Enum):
# The target where the function should be run
# LOCAL: Run it locally
LOCAL = 1
class PathType(Enum):
# Determines in which format should a path be formatted
# WINDOWS: Format with Windows style
# LINUX: Format with Linux/Posix style
# AUTO: Use current OS type to select either WINDOWS or LINUX
WINDOWS = 1
LINUX = 2
AUTO = 3
class PlatformExtras:
# A mixed bag of values used by dnnlib heuristics
# Attributes:
# data_reader_buffer_size: Used by DataReader to size internal shared memory buffers
# data_reader_process_count: Number of worker processes to spawn (zero for single
# thread operation)
def __init__(self):
self.data_reader_buffer_size = 1<<30 # 1 GB
self.data_reader_process_count = 0 # single threaded default
_user_name_override = None
class SubmitConfig(util.EasyDict):
# Strongly typed config dict needed to submit runs
# Attributes:
# run_dir_root: Path to the run dir root. Can be optionally templated with tags
# Needs to always be run through get_path_from_template
# run_desc: Description of the run. Will be used in the run dir and task name
# run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir
# run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will
# be the src directory inside the run dir
# submit_target: Submit target enum value. Used to select where the run is actually launched
# num_gpus: Number of GPUs used/requested for the run
# print_info: Whether to print debug information when submitting
# local.do_not_copy_source_files: Do not copy source files from the working directory to the
# run dir.
# run_id: Automatically populated value during submit
# run_name: Automatically populated value during submit
# run_dir: Automatically populated value during submit
# run_func_name: Automatically populated value during submit
# run_func_kwargs: Automatically populated value during submit
# user_name: Automatically populated value during submit. Can be set by the user which will then
# override the automatic value
# task_name: Automatically populated value during submit
# host_name: Automatically populated value during submit
# platform_extras: Automatically populated values during submit. Used by various dnnlib libraries
# such as the DataReader class
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs",
".vscode", "_cudacache"]
self.run_dir_extra_files = []
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.nvprof = False
self.local = internal.local.TargetOptions()
self.datasets = []
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
self.platform_extras = PlatformExtras()
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
# Replace tags in the given path template and return either Windows or Linux formatted path
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
# Convert a normal path back to its template representation
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
# Convert a normal path to template and the convert it back to a normal path with given path type
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
# Set the global username override value
global _user_name_override
_user_name_override = name
def get_user_name():
# Get the current user name
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def make_run_dir_path(*paths):
# Make a path/filename that resides under the current submit run_dir
# Args:
# *paths: Path components to be passed to os.path.join
# Returns:
# A file/dirname rooted at submit_config.run_dir. If there's no
# submit_config or run_dir, the base directory is the current
# working directory.
# E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))`
import dnnlib
if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):
return os.path.join(os.getcwd(), *paths)
return os.path.join(dnnlib.submit_config.run_dir, *paths)
def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str:
# Create a new run dir with increasing ID number at the start
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
os.makedirs(run_dir_root)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if not resume:
if os.path.exists(run_dir) and create_new:
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
if not os.path.exists(run_dir):
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
# Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id
# Assumes IDs are numbers at the start of the directory names
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:
# Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False)
if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:
return
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert "." in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count(".") - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True)
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
def run_wrapper(submit_config: SubmitConfig) -> None:
# Wrap the actual run function call for handling logging, exceptions, typing, etc
is_local = submit_config.submit_target == SubmitTarget.LOCAL
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name = os.path.join(submit_config.run_dir, "log.txt"), file_mode="a", should_flush = True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name = None, should_flush = True)
import dnnlib
dnnlib.submit_config = submit_config
exit_with_errcode = False
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
run_func_obj = util.get_obj_by_name(submit_config.run_func_name)
assert callable(run_func_obj)
sig = inspect.signature(run_func_obj)
if "submit_config" in sig.parameters:
run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs)
else:
run_func_obj(**submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
# Defer sys.exit(1) to happen after we close the logs and create a _finished.txt
exit_with_errcode = True
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.RunContext.get().close()
dnnlib.submit_config = None
logger.close()
# If we hit an error, get out of the script now and signal the error
# to whatever process that started this script.
if exit_with_errcode:
sys.exit(1)
return submit_config
def open_file_or_url(file_or_url):
if util.is_url(file_or_url):
return util.open_url(file_or_url, cache_dir = ".stylegan2-cache")
return open(file_or_url, "rb")
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding = "latin1")
def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False,
resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None:
# Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.
# create_newdir: enforces the creation of a new run directory
# resume: resumes a prior experiment using its existing run directory
# load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters
submit_config = copy.deepcopy(submit_config)
submit_target = submit_config.submit_target
farm = None
if submit_target == SubmitTarget.LOCAL:
farm = internal.local.Target()
assert farm is not None # unknown target
# Disallow submitting jobs with zero num_gpus
if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):
raise RuntimeError("submit_config.num_gpus must be set to a non-zero value")
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
#--------------------------------------------------------------------
# Prepare submission by populating the run dir
#--------------------------------------------------------------------
host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir)
submit_config.task_name = "{}-{:05d}-{}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
if not re.match(docker_valid_name_regex, submit_config.task_name):
raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name)
# Farm specific preparations for a submit
farm.finalize_submit_config(submit_config, host_run_dir)
# In case of resumption, load_config = True to load the prior submit_config file from the directory
# (so to maintain the original configuration of the experiment rather than the newly provided
# command-line arguments.
if load_config:
config_file = os.path.join(host_run_dir, "submit_config.pkl")
if os.path.exists(config_file):
old_submit_config = submit_config
submit_config = load_pkl(config_file)
submit_config["run_id"] = old_submit_config["run_id"]
submit_config["run_name"] = old_submit_config["run_name"]
if "resume_pkl" in old_submit_config["run_func_kwargs"]:
submit_config["run_func_kwargs"]["resume_pkl"] = old_submit_config["run_func_kwargs"]["resume_pkl"]
submit_config["run_func_kwargs"]["resume_kimg"] = old_submit_config["run_func_kwargs"]["resume_kimg"]
_populate_run_dir(submit_config, host_run_dir)
return farm.submit(submit_config, host_run_dir)
| 43.337176 | 238 | 0.691847 | 3,564 | 0.237 | 0 | 0 | 0 | 0 | 0 | 0 | 5,938 | 0.394866 |
b97f4f2077af2e6d4198d160e8fea133c49dee89 | 4,187 | py | Python | pyecharts/custom/grid.py | zilong305/pycharts | 6cf1bb7f17001a36da6a766615a78b1dbef5918f | [
"MIT"
] | null | null | null | pyecharts/custom/grid.py | zilong305/pycharts | 6cf1bb7f17001a36da6a766615a78b1dbef5918f | [
"MIT"
] | null | null | null | pyecharts/custom/grid.py | zilong305/pycharts | 6cf1bb7f17001a36da6a766615a78b1dbef5918f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from pyecharts.option import grid
class Grid(object):
def __init__(self):
self._chart = None
self._js_dependencies = set()
def add(self, chart,
grid_width=None,
grid_height=None,
grid_top=None,
grid_bottom=None,
grid_left=None,
grid_right=None):
"""
:param chart:
chart instance
:param grid_width:
Width of grid component. Adaptive by default.
:param grid_height:
Height of grid component. Adaptive by default.
:param grid_top:
Distance between grid component and the top side of the container.
:param grid_bottom:
Distance between grid component and the bottom side of the container.
:param grid_left:
Distance between grid component and the left side of the container.
:param grid_right:
Distance between grid component and the right side of the container.
:return:
"""
if self._chart is None:
self._chart = chart
self._chart._option.update(grid=[])
self._js_dependencies = chart._js_dependencies
_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)
if _grid:
for _ in range(len(self._chart._option.get('series'))):
self._chart._option.get('grid').append(_grid)
else:
_series = (
chart._option.get('series'),
chart._option.get('xAxis', None),
chart._option.get('yAxis', None),
chart._option.get('legend')[0],
chart._option.get('title')[0]
)
_index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series)
self._chart._option.get('legend').append(_legned)
self._chart._option.get('title').append(_title)
if _xaxis and _yaxis is not None:
try:
_xaxis[0].update(gridIndex=_index-1)
_yaxis[0].update(gridIndex=_index-1)
self._chart._option.get('xAxis').append(_xaxis[0])
self._chart._option.get('yAxis').append(_yaxis[0])
except:
pass
# indexflag is only identify for every series
_flag = self._chart._option.get('series')[0].get('indexflag')
_series_index = 0
for s in self._chart._option.get('series'):
if _flag == s.get('indexflag'):
s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)
else:
_series_index += 1
s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)
_flag = s.get('indexflag')
_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)
for _ in range(_index_once):
self._chart._option.get('grid').append(_grid)
self._js_dependencies.union(chart._js_dependencies)
def __custom(self, series):
"""
:param series:
series data
:return:
"""
_series, _xaxis, _yaxis, _legend, _title = series
for s in _series:
self._chart._option.get('series').append(s)
return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title
def render(self, path="render.html"):
"""
:param path:
:return:
"""
self._chart.render(path)
def render_embed(self):
"""
:return:
"""
return self._chart.render_embed()
def show_config(self):
"""
:return:
"""
import pprint
return pprint.pprint(self._chart._option)
@property
def chart(self):
"""
:return:
"""
return self._chart
def _repr_html_(self):
"""
:return:
"""
return self._chart._repr_html_()
| 31.961832 | 100 | 0.540482 | 4,112 | 0.982087 | 0 | 0 | 99 | 0.023645 | 0 | 0 | 1,192 | 0.284691 |
b97f78c59a8296809ae879f2d6f8355b0f8c52d0 | 4,588 | py | Python | smooch/conversations.py | devinmcgloin/smooch | c9561c3e7f1546efc58daa472b70f738d0d35e13 | [
"MIT"
] | 3 | 2016-07-04T12:02:03.000Z | 2017-03-20T19:39:36.000Z | smooch/conversations.py | devinmcgloin/smooch | c9561c3e7f1546efc58daa472b70f738d0d35e13 | [
"MIT"
] | 41 | 2019-05-28T09:54:04.000Z | 2020-02-20T05:34:19.000Z | smooch/conversations.py | devinmcgloin/smooch | c9561c3e7f1546efc58daa472b70f738d0d35e13 | [
"MIT"
] | 2 | 2016-07-20T14:31:45.000Z | 2016-11-18T12:19:38.000Z | import logging
from .endpoint import ask
def send_message(user_id, message, sent_by_maker=True):
if not valid_args(user_id, message):
logging.warning("send message called with invalid args user_id={} message={}".format(user_id, message))
return
logging.debug("Sending message: user_id={0} message={1} sent_by_maker={2}".format(user_id, message, sent_by_maker))
role = "appMaker"
if not sent_by_maker:
role = "appUser"
data = {"text": message, "role": role}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def get_conversation(user_id):
if not user_id:
logging.warning("get conversation called with invalid arg user_id={}".format(user_id))
return
logging.debug("Get conversation: user_id={}".format(user_id))
return ask('appusers/{0}/conversation'.format(user_id), {}, 'get')
def request_payment(user_id, message, options):
"""Note that amount is a integer which specifies the amount of cents in the transaction
Smooch will default to the currency specified in your account settings."""
if not valid_args(user_id, message, options):
logging.warning("request payment called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "buy",
"text": short_text,
"amount": result})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_links(user_id, message, options):
"""Sends a series of links. The options field is a dictionary in which the keys are
descriptions and values uris"""
if not valid_args(user_id, message, options):
logging.warning("send links called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "link",
"text": short_text,
"uri": result})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_postbacks(user_id, message, options):
"""Sends a series of options that you can listen for on your webhook. The options field is a dictionary in which the keys are
descriptions and values the postback payload. You need to set up a webhook to listen for the postback."""
if not valid_args(user_id, message, options):
logging.warning("send postback called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "postback",
"text": short_text,
"payload": result
})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_buttons(user_id, message, options):
"""Options is a list of tuples in which the first element is the type of the button,
second the short text, and third the result for the specified type."""
if not valid_args(user_id, message, options):
logging.warning("send buttons called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for text, kind, result in options:
buttons.append({
"type": kind,
"text": text,
"payload": result
})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def valid_args(user_id, message, options=None):
if options is not None:
if user_id and message and options and type(options) is list:
return True
return False
else:
if user_id and message:
return True
return False
| 30.586667 | 129 | 0.598954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,670 | 0.363993 |
b980ab008a2dab6e2778edec1d7d9e24b2315a73 | 1,086 | py | Python | cifar/evalit.py | Sharkbyteprojects/IRIS-ML_and_Deep-Learning | f0e053cf7a0e69019bbba36e6da3e60d76105fe9 | [
"MIT"
] | null | null | null | cifar/evalit.py | Sharkbyteprojects/IRIS-ML_and_Deep-Learning | f0e053cf7a0e69019bbba36e6da3e60d76105fe9 | [
"MIT"
] | null | null | null | cifar/evalit.py | Sharkbyteprojects/IRIS-ML_and_Deep-Learning | f0e053cf7a0e69019bbba36e6da3e60d76105fe9 | [
"MIT"
] | null | null | null | import keras
from keras.models import load_model
from PIL import Image
import matplotlib.pylab as plt
import numpy as np
import zipfile
print("Extract")
zip_ref = zipfile.ZipFile("./asset.zip", 'r')
zip_ref.extractall(".")
zip_ref.close()
print("Load Model")
model=load_model("cifar-model.h5")
CIFAR_10_CLASSES=["Plane","Car","bird","cat","deer","dog","frog","horse","ship","truck"]
def calc(imname):
test_image =Image.open("asset/"+imname)
test_image=test_image.resize((32,32),Image.ANTIALIAS)
test_image=np.array(test_image,dtype="float32")
test_image/=255
test_image=test_image.reshape(-1,32,32,3)
predictions=model.predict(test_image)
index_max_pred=np.argmax(predictions)
plt.title("Complete: {}".format(CIFAR_10_CLASSES[index_max_pred]))
plt.imshow(test_image[0].reshape(32,32,3))
print(predictions)
plt.show()
print("START TEST")
calc("lkw-image.jpg")
calc("cat.jpg")
calc("frog.jpg")
calc("fog.jpg")
calc("lfog.jpg")
calc("d.jpg")
calc("b.jpg")
calc("bs.jpg")
calc("plapper.jpg")
calc("ds.jpg")
print("Complete")
print("End")
quit(0)
| 27.15 | 88 | 0.710866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.248619 |
b980be1e0d2b8db749e25a4f49c35cdddbdca9d9 | 1,650 | py | Python | tt/urls.py | samiksha-patil/Knowledge-Sharing-Platform | 22e61a659d5ad63fe656fa639dc897cbdebad4fe | [
"bzip2-1.0.6"
] | 1 | 2021-05-09T08:18:49.000Z | 2021-05-09T08:18:49.000Z | tt/urls.py | samiksha-patil/Knowledge-Sharing-Platform | 22e61a659d5ad63fe656fa639dc897cbdebad4fe | [
"bzip2-1.0.6"
] | 9 | 2021-03-19T01:11:35.000Z | 2022-03-12T00:20:13.000Z | tt/urls.py | samiksha-patil/Knowledge-Sharing-Platform | 22e61a659d5ad63fe656fa639dc897cbdebad4fe | [
"bzip2-1.0.6"
] | null | null | null | """
tt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# Uncomment next two lines to enable admin:
from django.contrib import admin
from django.urls import path, include
from users import views as user_views
from django.contrib.auth import views as auth_views
from upload import views as upload_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# Uncomment the next line to enable the admin:
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('register/', user_views.register, name='register'),
path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),
path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'),
path('profile/', user_views.profile, name='profile'),
path('book/',upload_views.book_list,name='book_list'),
path('book/upload',upload_views.upload_book,name='upload_book'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.869565 | 100 | 0.726061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.539394 |
b9814171798d1f2ddf5247c67182a7e7e032132e | 105 | py | Python | src/git/cmd.py | danihodovic/dht | 636f54d70f8c6ca60ab48f2815b3e9e1a336d78f | [
"MIT"
] | 2 | 2021-01-21T15:04:32.000Z | 2021-01-21T16:23:32.000Z | src/git/cmd.py | danihodovic/dht | 636f54d70f8c6ca60ab48f2815b3e9e1a336d78f | [
"MIT"
] | 2 | 2020-12-30T20:34:51.000Z | 2021-01-17T20:02:02.000Z | src/git/cmd.py | danihodovic/dht | 636f54d70f8c6ca60ab48f2815b3e9e1a336d78f | [
"MIT"
] | null | null | null | import os
import click
os.environ["GIT_PYTHON_REFRESH"] = "quiet"
@click.group()
def git():
pass
| 9.545455 | 42 | 0.67619 | 0 | 0 | 0 | 0 | 34 | 0.32381 | 0 | 0 | 27 | 0.257143 |
b98238142a5e4442e3c9fdd220f6bde9274299de | 570 | py | Python | TwitterImage2JPG.py | Tymec/Playground | 5a4aaa4a88e084d8d31803485b1ec521ad49a3d1 | [
"MIT"
] | null | null | null | TwitterImage2JPG.py | Tymec/Playground | 5a4aaa4a88e084d8d31803485b1ec521ad49a3d1 | [
"MIT"
] | null | null | null | TwitterImage2JPG.py | Tymec/Playground | 5a4aaa4a88e084d8d31803485b1ec521ad49a3d1 | [
"MIT"
] | 1 | 2019-02-19T10:32:07.000Z | 2019-02-19T10:32:07.000Z | import glob
import os
def main():
os.chdir("F:/Downloads")
extensions = ["*.jpg_large", "*.png_large", "*.jpg_orig"]
file_list = list()
for extension in extensions:
file_list = file_list + glob.glob(extension)
for file in file_list:
for extension in extensions:
new_extension = extension.replace('*', '')
if file.endswith(new_extension):
new_name = file.replace(new_extension, '') + ".jpg"
os.rename(file, new_name)
print("Done!")
if __name__ == __name__:
main()
| 22.8 | 67 | 0.585965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.126316 |
b982943f0b8c226209550f8c7f62a0e03d0b5ff5 | 6,405 | py | Python | Data Analysis/classification.py | Riccardo95Facchini/DIL-2019 | febeda55fd647943a1b8c49b3c5192fcd69fdaf5 | [
"MIT"
] | null | null | null | Data Analysis/classification.py | Riccardo95Facchini/DIL-2019 | febeda55fd647943a1b8c49b3c5192fcd69fdaf5 | [
"MIT"
] | null | null | null | Data Analysis/classification.py | Riccardo95Facchini/DIL-2019 | febeda55fd647943a1b8c49b3c5192fcd69fdaf5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
#EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB
input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv'
dataset = pd.read_csv(input_file, sep=';', header = 0)
dataset.head()
#DELETE NEXT CALLS DATA
dataset = dataset.drop("contact", axis=1)
dataset = dataset.drop("day", axis=1)
dataset = dataset.drop("month", axis=1)
dataset = dataset.drop("duration", axis=1)
dataset = dataset.drop("campaign", axis=1)
dataset = dataset.drop("pdays", axis=1)
dataset = dataset.drop("previous", axis=1)
dataset = dataset.drop("poutcome", axis=1)
dataset.head()
#FEATURE ENGINEERING
cleanup_nums = {"marital": {"married": 1, "single": 0, "divorced":-1},
"education": {"primary": 1, "secondary": 2, "tertiary": 3},
"default": {"yes": 1, "no": 0},
"housing": {"yes": 1, "no": 0},
"loan": {"yes": 1, "no": 0},
"y": {"yes": 1, "no": 0}}
dataset.replace(cleanup_nums, inplace=True)
dataset.head()
dataset.dtypes
dataset = dataset[dataset.job != 'unknown']
dataset = dataset[dataset.education != 'unknown']
dataset['education'] = dataset['education'].astype(int)
#COLLERATION MATRIX
plt.figure(figsize=(12,10))
cor = dataset.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.show()
#CLASSIFIFICATION
X = dataset.iloc[:, 0:7]
y = dataset.iloc[:, 7]
X = pd.get_dummies(X, columns=["job"], prefix=["job"])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
#DECISION TREE
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
clf_dt = DecisionTreeClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, esito)
print(cm)
plt.hist(esito)
#RANDOM FOREST
from sklearn.ensemble import RandomForestClassifier
clf_dt = RandomForestClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, esito)
print(cm)
plt.hist(esito)
# K-NEAREST NEIGHBOURS
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# TRAINING - TEST
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# SCALING
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# FITTING
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train)
# PREDICTION
y_pred = classifier.predict(X_test)
# CONFUSION MATRIX
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, y_pred,target_names=target_names))
print(cm)
plt.hist(y_pred)
#UNDERSAMPLING
from sklearn.utils import resample
dataset_sample = pd.get_dummies(dataset, columns=["job"], prefix=["job"])
#SPLIT FEATURE AND TARGET
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
#TRAIN TEST
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
X = pd.concat([X_train, y_train], axis=1)
#SELECTING TARGET CLASSES
not_sub = X[X.y==0]
sub = X[X.y==1]
not_sub_downsampled = resample(not_sub,
replace = False,
n_samples = len(sub),
random_state = 27)
# COMBINE MINORITY AND DOWNSAMPLED MAJORITY
downsampled = pd.concat([not_sub_downsampled, sub])
#DECISION TREE
y_train = downsampled.y
X_train = downsampled.drop('y', axis=1)
clf_dt = DecisionTreeClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
#RANDOM FOREST
y_train = downsampled.y
X_train = downsampled.drop('y', axis=1)
clf_dt = RandomForestClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
#SMOTE - DECISION TREE
from imblearn.over_sampling import SMOTE
#SPLIT FEATURE TARGET
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
#TRAIN TEST
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
#SMOTE
sm = SMOTE(random_state=27, ratio=1.0)
X_train, y_train = sm.fit_sample(X_train, y_train)
clf_dt = DecisionTreeClassifier()
#FIT
smote = clf_dt.fit(X_train,y_train)
#PREDICITON
smote_pred = smote.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, smote_pred,target_names=target_names))
#SMOTE - RANDOM FOREST
from imblearn.over_sampling import SMOTE
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
sm = SMOTE(random_state=27, ratio=1.0)
X_train, y_train = sm.fit_sample(X_train, y_train)
clf_dt = RandomForestClassifier()
smote = clf_dt.fit(X_train,y_train)
smote_pred = smote.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, smote_pred,target_names=target_names))
#RECAP on RECALL
x = np.arange(3)
plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT')
plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF')
plt.xticks(x-0.1, ['Normal','Under','Smote'])
plt.legend(loc='upper right')
#RECAP on F1
x = np.arange(3)
plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT')
plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF')
plt.xticks(x-0.1, ['Normal','Under','Smote'])
plt.legend(loc='lower right') | 25.722892 | 98 | 0.721624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,210 | 0.188915 |
b982c2b4e976b723dfa3208c1bc1e4ea51b77ac9 | 5,562 | py | Python | tools/c7n_azure/tests/test_route_table.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | 2 | 2020-01-20T19:46:28.000Z | 2020-08-19T14:20:27.000Z | tools/c7n_azure/tests/test_route_table.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | 79 | 2019-03-20T12:27:06.000Z | 2019-08-14T14:07:04.000Z | tools/c7n_azure/tests/test_route_table.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | 2 | 2019-04-22T15:20:23.000Z | 2019-08-27T12:37:51.000Z | # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure_common import BaseTest, arm_template
class RouteTableTest(BaseTest):
route_table_name = 'cctestroutetable'
vnet_name = 'ccroutetablevnet'
allowed_subnet_name = 'cctestsubnet1'
disallowed_subnet_name = 'cctestsubnet2'
@staticmethod
def _subnet_id_suffix(subnet):
return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet)
def test_route_table_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-route-table',
'resource': 'azure.routetable'
}, validate=True)
self.assertTrue(p)
@arm_template('route-table-and-vnet.json')
def test_find_route_table_by_name(self):
p = self.load_policy({
'name': 'test-find-route-table-by-name',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_is_routing_to_correct_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-is-routing-to-correct-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_not_routing_to_incorrect_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-not-routing-to-incorrect-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 0, "A route table is routing to a disallowed subnet")
@arm_template('route-table-and-vnet.json')
def test_detect_route_only_routes_to_specific_subnets(self):
p = self.load_policy({
'name': 'test-detect-route-only-routes-to-specific-subnets',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
},
{
'type': 'value',
'key': 'length(properties.subnets)',
'op': 'eq',
'value': 1
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
def _assert_only_route_table_in_resources(self, resources):
self.assertEqual(len(resources), 1, "Only one route table should be found")
route_table = resources[0]
self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'),
"The wrong route table was found")
properties = route_table.get('properties')
self.assertIsNotNone(properties, "Missing properties")
subnets = properties.get('subnets')
self.assertIsNotNone(subnets, "Missing subnets")
self.assertEqual(1, len(subnets), "There should only be one subnet")
subnet = subnets[0]
self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), "Incorrect subnet")
| 35.426752 | 95 | 0.538655 | 4,920 | 0.884574 | 0 | 0 | 3,668 | 0.659475 | 0 | 0 | 1,900 | 0.341604 |
b98531b0567b9e4719006397ec461d3fa4999e4b | 11,730 | py | Python | proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py | pkthein/sparts_all_fam | ff162e4ea8c3919a197dc0cc13fde6b32da113c7 | [
"Apache-2.0"
] | 1 | 2019-04-03T18:31:36.000Z | 2019-04-03T18:31:36.000Z | proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py | pkthein/sparts_all_fam | ff162e4ea8c3919a197dc0cc13fde6b32da113c7 | [
"Apache-2.0"
] | null | null | null | proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py | pkthein/sparts_all_fam | ff162e4ea8c3919a197dc0cc13fde6b32da113c7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Intel Corporation
# Copyright 2017 Wind River
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
################################################################################
# LIBRARIES & DEPENDENCIES #
################################################################################
import hashlib
import logging
import json
from collections import OrderedDict
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.processor.handler import TransactionHandler
LOGGER = logging.getLogger(__name__)
################################################################################
# HANDLER OBJ #
################################################################################
class ArtifactTransactionHandler:
"""
Class for handling the Transaction Family : Artifact
Attributes:
namespace_prefix (str): The namespace prefix of the transaction family
"""
def __init__(self, namespace_prefix):
"""
Constructs the ArtifactTransactionHandler object.
Args:
namespace_prefix (str):
The namepsace prefix of the transaction family
"""
self._namespace_prefix = namespace_prefix
@property
def family_name(self):
"""
type: str
Returns the family name of the handler object.
"""
return "artifact"
@property
def family_versions(self):
"""
type: list of str
Returns the family version of the handler object.
"""
return ["1.0"]
@property
def encodings(self):
"""
type: list of str
Returns the encoding scheme used for the data for the handler object.
"""
return ["csv-utf8"]
@property
def namespaces(self):
"""
type: list of str
Returns the namespaces associating with the handler object.
"""
return [self._namespace_prefix]
################################################################################
# FUNCTIONS #
################################################################################
def apply(self, transaction, context):
"""
Applys the payload from transaction onto the state storage.
Args:
transaction (Transaction): The transaction pertaining the payload
context (State): The current state of the ledger
Returns:
type: State
The new state of the ledger, which includes the data from the
transaction, is returned to be stored on the state storage.
Raises:
InvalidTransaction:
* If deserialization for payload from transaction failed
* If "create" was called on non-unique uuid
* If "amend" was called on non-existing uuid
* If "Add..." were called on non-existing uuid
* If invalid operation was called
InternalError:
* If deserialization of State.data failed
"""
# Parsing required fields from transaction payload
try:
payload = json.loads(transaction.payload.decode())
artifact_id = payload["uuid"]
artifact_alias = payload["alias"]
artifact_name = payload["name"]
artifact_type = payload["content_type"]
artifact_checksum = payload["checksum"]
artifact_label = payload["label"]
artifact_openchain = payload["openchain"]
action = payload["action"]
prev = payload["prev_block"]
cur = payload["cur_block"]
timestamp = payload["timestamp"]
artifact_list = payload["artifact_list"]
uri_list = payload["uri_list"]
except ValueError:
raise InvalidTransaction("Invalid payload serialization")
# Soft sanity check and loading required data
validate_transaction(artifact_id, action)
data_address = make_artifact_address(self._namespace_prefix,
artifact_id)
state_entries = context.get_state([data_address])
# Hard sanity check before creating final payload for the state storage
if len(state_entries) != 0:
try:
stored_artifact = json.loads(state_entries[0].data.decode())
stored_artifact_id = stored_artifact["uuid"]
except ValueError:
raise InternalError("Failed to deserialize data.")
else:
stored_artifact_id = stored_artifact = None
if action == "create" and stored_artifact_id is not None:
raise InvalidTransaction("Invalid Action-artifact already exists.")
elif action == "create":
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp)
elif action == "amend" and stored_artifact_id is not None:
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list, uri_list)
elif action == "AddArtifact" or action == "AddURI":
if stored_artifact_id is None:
raise InvalidTransaction(
"Invalid Action-requires an existing artifact."
)
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp,
artifact_list, uri_list)
# Adding the final payload to the state storage
data = json.dumps(artifact).encode()
addresses = context.set_state({data_address:data})
return addresses
################################################################################
# HELPER FUNCTIONS #
################################################################################
def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,
artifact_checksum, artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list=[], uri_list=[]):
"""
Constructs the payload to be stored in the state storage.
Args:
artifact_uuid (str): The uuid of the artifact
artifact_alias (str): The alias of the artifact
artifact_name (str): The name of the artifact
artifact_type (str): The type of the artifact
artifact_checksum (str): The checksum of the artifact
artifact_label (str): The label of the artifact
artifact_openchain (str): The openchain of the artifact
prev (str): The previous block id of the transaction (default "0")
cur (str): the current block id of the transaction
timestamp (str): The UTC time for when the transaction was submitted
artifact_list (list of dict):
The list of the artifact uuid associated with the artifact
(default [])
uri_list (list of dict):
The list of the uri associated with the artifact (default [])
Returns:
type: dict
The dictionary pertaining all the param is created and returned to
be stored on the state storage.
"""
return {
"uuid" : artifact_id,
"alias" : artifact_alias,
"name" : artifact_name,
"content_type" : artifact_type,
"checksum" : artifact_checksum,
"label" : artifact_label,
"openchain" : artifact_openchain,
"prev_block" : prev,
"cur_block" : cur,
"timestamp" : timestamp,
"artifact_list" : artifact_list,
"uri_list" : uri_list
}
def validate_transaction(artifact_id, action):
"""
Performs soft sanity check in order to improve runtime by eliminating the
obvious exception errors.
Args:
artifact_id (str): The uuid of the artifact
action (str): The command to be performed
Raises:
InvalidTransaction:
If the uuid or the action are not passed in or the
action is not a valid action.
"""
if not artifact_id:
raise InvalidTransaction("Artifact ID is required")
if not action:
raise InvalidTransaction("Action is required")
if action not in ("AddArtifact", "create", "AddURI", "amend"):
raise InvalidTransaction("Invalid action: {}".format(action))
def make_artifact_address(namespace_prefix, artifact_id):
"""
Creates an artifact address which will be used to recover the associated
UUID if the artifact already exists in the state storage; or, used as a key to
store the new data into the state storage.
Args:
namespace_prefix (str):
The prefix associating with the transaction family
artifact_id (str): The uuid of the artifact
Returns:
type: str
The address-to-be, which associates the uuid and the namespace prefix.
"""
return namespace_prefix + \
hashlib.sha512(artifact_id.encode("utf-8")).hexdigest()[:64]
def _display(msg):
"""
Logs the message to the debug logger.
Args:
msg (str): The message that is to be logged into the debug logger
"""
n = msg.count("\n")
if n > 0:
msg = msg.split("\n")
length = max(len(line) for line in msg)
else:
length = len(msg)
msg = [msg]
LOGGER.debug("+" + (length + 2) * "-" + "+")
for line in msg:
LOGGER.debug("+ " + line.center(length) + " +")
LOGGER.debug("+" + (length + 2) * "-" + "+")
################################################################################
# #
################################################################################
| 39.897959 | 82 | 0.521313 | 5,899 | 0.502899 | 0 | 0 | 749 | 0.063853 | 0 | 0 | 6,596 | 0.562319 |
b9877d896f97460bc5a35787da6277925368bc9f | 764 | py | Python | ReviewsCollector.py | fsandx/moodybooks | 5c13fe43849e4fa861a163c74411e9f796518bc9 | [
"MIT"
] | null | null | null | ReviewsCollector.py | fsandx/moodybooks | 5c13fe43849e4fa861a163c74411e9f796518bc9 | [
"MIT"
] | null | null | null | ReviewsCollector.py | fsandx/moodybooks | 5c13fe43849e4fa861a163c74411e9f796518bc9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
STEP 2
Takes the list of urls in the json files and downloads the html files to local drive
Start with: scrapy runspider ReviewsCollector.py
"""
import scrapy
import json
class ReviewsCollector(scrapy.Spider):
def start_requests(self):
with open("data/books.json") as f:
self.data = json.load(f)
for item in self.data:
if (item['url'] is not None):
yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse)
def parse(self, response):
filename = response.url.split("/")[-1] + '.html'
with open('data/reviews/' + filename, 'wb+') as f:
f.write(response.body) | 29.384615 | 124 | 0.611257 | 539 | 0.705497 | 311 | 0.407068 | 0 | 0 | 0 | 0 | 284 | 0.371728 |
b9887b38cf06939bc8dd710e9861e2366862482a | 3,120 | py | Python | firelight/interfaces/light.py | roshie548/firelight | 3a5af5e2a1e5784127baebcf1517ffddcaff4062 | [
"MIT"
] | 16 | 2021-11-29T03:05:31.000Z | 2022-01-19T05:32:45.000Z | firelight/interfaces/light.py | roshie548/firelight | 3a5af5e2a1e5784127baebcf1517ffddcaff4062 | [
"MIT"
] | null | null | null | firelight/interfaces/light.py | roshie548/firelight | 3a5af5e2a1e5784127baebcf1517ffddcaff4062 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from .color import Color
class LightSystem(ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'set_transition_time')
and callable(subclass.set_transition_time)
and hasattr(subclass, 'discover_lights')
and callable(subclass.discover_lights)
and hasattr(subclass, 'set_color_all_lights')
and callable(subclass.set_color_all_lights))
@abstractmethod
def discover_lights(self):
"""Discover the lights and groups in this LightSystem."""
raise NotImplementedError
@abstractmethod
def set_transition_time(self, transition_time: int):
"""Set how long it takes in milliseconds for colors to transition."""
raise NotImplementedError
@abstractmethod
def set_color(self, color: Color):
"""Set the color of all the lights in the LightSystem."""
raise NotImplementedError
class LightGroup(ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'turn_on')
and callable(subclass.turn_on)
and hasattr(subclass, 'turn_off')
and callable(subclass.turn_off)
and hasattr(subclass, 'set_transition_time')
and callable(subclass.set_transition_time)
and hasattr(subclass, 'set_color')
and callable(subclass.set_color))
@abstractmethod
def turn_on(self):
"""Turn on the lights in this group."""
raise NotImplementedError
@abstractmethod
def turn_off(self):
"""Turn off the lights in this group."""
raise NotImplementedError
@abstractmethod
def set_transition_time(self, transition_time: int):
"""Set how long it takes in milliseconds for colors to transition."""
raise NotImplementedError
@abstractmethod
def set_color(self, color: Color):
"""Set the color of this light."""
raise NotImplementedError
class LightDevice(ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'turn_on')
and callable(subclass.turn_on)
and hasattr(subclass, 'turn_off')
and callable(subclass.turn_off)
and hasattr(subclass, 'set_transition_time')
and callable(subclass.set_transition_time)
and hasattr(subclass, 'set_color')
and callable(subclass.set_color))
@abstractmethod
def turn_on(self):
"""Turn on this light."""
raise NotImplementedError
@abstractmethod
def turn_off(self):
"""Turn off the light."""
raise NotImplementedError
@abstractmethod
def set_transition_time(self, transition_time: int):
"""Set how long it takes in milliseconds for colors to transition."""
raise NotImplementedError
@abstractmethod
def set_color(self, color: Color):
"""Set the color of this light."""
raise NotImplementedError
| 32.842105 | 77 | 0.641026 | 3,050 | 0.977564 | 0 | 0 | 2,901 | 0.929808 | 0 | 0 | 680 | 0.217949 |
b98b6f0b6e5f35ef44fd272ec1f3a99b4d72acf0 | 1,293 | py | Python | PolymorphismPYTHON/Polypy.py | cadeng23/oop-cjgustafson | cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8 | [
"MIT"
] | null | null | null | PolymorphismPYTHON/Polypy.py | cadeng23/oop-cjgustafson | cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8 | [
"MIT"
] | null | null | null | PolymorphismPYTHON/Polypy.py | cadeng23/oop-cjgustafson | cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8 | [
"MIT"
] | null | null | null | import random
class Family:
def __init__(self,first, last, hair):
self.first = first
self.last = last
self.hair = hair
def fullname(self):
return '{} {}'.format(self.first,self.last)
def eyefind(self):
temp = random.choice([1,2])
#using the punnet square in genetics we know thatt a donor
#with blue eyes and one with brown makes it 50/50 odds
#that the childs eyes will be brown or blue
if (temp == 1):
self.EYES = ("Brown")
else:
self.EYES = ("Blue")
return self.EYES
def Apply_eyes(self):
self.eyes = self.EYES
Daughter = Family('Ashley', 'Smith', 'Brown')
Son = Family('Kevin', 'Smith', 'Brown')
print(Daughter.eyes)
print(Son.eyes)
#with the kids being born it will define what color hair and eyes
# they may randomly get through inheritance
class Kids(Family):
pass
#Eyes are marked as Grey because they are unknown for now
# hair colors are brown because brown is the dominant hair color
Daughter = Kids('Danielle', 'Smith', 'Brown' )
Son = Kids('Kevin','Smith','Brown')
print(Daughter.eyes)
print(Son.eyes)
Daughter.Apply_eyes()
Son.Apply_eyes()
print(Daughter.eyes)
print(Son.eyes)
| 23.089286 | 66 | 0.618716 | 1,027 | 0.794277 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.381284 |
b98c3a1636cff18e5244db1f52b8e6e89e2c99b5 | 1,494 | py | Python | homeassistant/components/device_tracker/owntracks.py | evancohen/home-assistant | dafc0ced6b07025c03417d8e7a2c0133b4c622fc | [
"MIT"
] | 14 | 2015-11-10T07:57:43.000Z | 2021-08-29T13:45:26.000Z | homeassistant/components/device_tracker/owntracks.py | evancohen/home-assistant | dafc0ced6b07025c03417d8e7a2c0133b4c622fc | [
"MIT"
] | null | null | null | homeassistant/components/device_tracker/owntracks.py | evancohen/home-assistant | dafc0ced6b07025c03417d8e7a2c0133b4c622fc | [
"MIT"
] | 8 | 2015-11-14T16:40:41.000Z | 2020-02-17T19:48:08.000Z | """
homeassistant.components.device_tracker.owntracks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OwnTracks platform for the device tracker.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import homeassistant.components.mqtt as mqtt
DEPENDENCIES = ['mqtt']
LOCATION_TOPIC = 'owntracks/+/+'
def setup_scanner(hass, config, see):
""" Set up a OwnTracksks tracker. """
def owntracks_location_update(topic, payload, qos):
""" MQTT message received. """
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
logging.getLogger(__name__).error(
'Unable to parse payload as JSON: %s', payload)
return
if not isinstance(data, dict) or data.get('_type') != 'location':
return
parts = topic.split('/')
kwargs = {
'dev_id': '{}_{}'.format(parts[1], parts[2]),
'host_name': parts[1],
'gps': (data['lat'], data['lon']),
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
return True
| 27.666667 | 74 | 0.582999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.412985 |
b98c6a6e2a07073f4614093d6ae5d6469afd6835 | 48,027 | py | Python | src/models/end_to_end_event_coreference.py | luyaojie/E3C | 4b2f33da4629211fd6a3738077794f821c7f7c8b | [
"MIT"
] | 2 | 2022-02-20T15:13:11.000Z | 2022-03-22T03:47:21.000Z | src/models/end_to_end_event_coreference.py | luyaojie/E3C | 4b2f33da4629211fd6a3738077794f821c7f7c8b | [
"MIT"
] | null | null | null | src/models/end_to_end_event_coreference.py | luyaojie/E3C | 4b2f33da4629211fd6a3738077794f821c7f7c8b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by Roger on 2019-09-10
# Mostly by AllenNLP
import logging
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Pruner
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder
from allennlp.modules.similarity_functions import DotProductSimilarity
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import Average
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from src.metrics.event_coref_scores import EventCorefScores
from src.metrics.mention_f1 import TopSpanMentionTypeF1
from src.utils.cluster_decoding_utils import node_decode
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("end-to-end-event-coreference")
class End2EndEventCoreferenceResolver(Model):
"""
This ``Model`` implements the coreference resolution model described "End-to-end Neural
Coreference Resolution"
<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>
by Lee et al., 2017.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward: ``FeedForward``
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
max_span_width: ``int``
The maximum width of candidate spans.
spans_per_word: float, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: int, required.
For each mention which survives the pruning stage, we consider this many antecedents.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
mention_feedforward: FeedForward,
antecedent_feedforward: FeedForward,
feature_size: int,
context_layer: Seq2SeqEncoder = None,
max_span_width: int = 1,
spans_per_word: float = 0.1,
max_antecedents: int = 50,
lexical_dropout: float = 0.2,
pretrain_ed: bool = False,
pretrain_coref: bool = False,
coref_loss_weight: float = 1.0,
bce_loss_weight: float = 1.0,
bce_pos_weight: float = None,
local_window_size: int = 10,
attention_type: str = 'dot',
decoding: str = 'type-guided',
type_threshold: float = -1.,
type_refine: bool = True,
type_match_in_eval: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer)
logger.info(vocab)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
self._event_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))
)
self._pretrain_ed = pretrain_ed
self._pretrain_coref = pretrain_coref
self._mention_pruner = Pruner(self._event_scorer)
self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))
self._local_window_size = local_window_size
self._attention_type = attention_type
self._decoding = decoding
self._type_threshold = type_threshold
logger.info(vocab.get_token_from_index(0, "labels"))
if context_layer is not None:
endpoint_span_extractor_dim = context_layer.get_output_dim()
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim()
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
else:
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
if max_span_width > 1:
endpoint_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
else:
self._endpoint_span_extractor = None
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
if self._endpoint_span_extractor is not None:
span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim()
else:
span_embedding_size = self._attentive_span_extractor.get_output_dim()
if type_refine:
self._type_refine_gate = torch.nn.Sequential(
TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)),
torch.nn.Sigmoid()
)
else:
self._type_refine_gate = None
# NIL for Unified Event
self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'),
embedding_dim=span_embedding_size)
self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2,
self._event_embedding.get_output_dim())
self._positive_label_size = vocab.get_vocab_size('labels') - 1
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
self._coref_loss_weight = coref_loss_weight
self._bce_loss_weight = bce_loss_weight
self._bce_pos_weight = bce_pos_weight
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._mention_f1_score = TopSpanMentionTypeF1()
self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval)
self._type_loss_metric = Average()
self._realis_loss_metric = Average()
self._coref_loss_metric = Average()
self._coref_label_metric = Average()
self._type_label_metric = Average()
self._nil_label_metric = Average()
if self._bce_pos_weight:
self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight))
else:
self._bce_loss = BCEWithLogitsLoss(reduction='none')
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
def _get_event_embedding(self, span_mask):
"""
:param span_mask:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1
event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1)
event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)])
event_embeddings = self._event_embedding(event_indices)
event_embeddings = event_embeddings.reshape(event_embeddings.size(0),
event_embeddings.size(1) * event_embeddings.size(2))
event_embeddings = self._event_embedding_map.forward(event_embeddings)
event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0),
event_embeddings.size(0),
event_embeddings.size(1),
)
return event_embeddings
def _get_type_antecedent_labels(self, top_event_type_labels):
"""
:param top_event_type_labels:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'),
device=util.get_device_of(top_event_type_labels))
top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0),
top_event_type_labels.size(1),
event_indices.size(0)])
type_antecedent_labels = (top_event_type_labels == event_indices).float()
return type_antecedent_labels
def _type_refine_embedding(self, top_embeddings, event_embeddings):
# (batch, top_span_size, emb_size) bmm
event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2))
shape = [event_prob.size(0), event_prob.size(1), 1]
dummy_scores = event_prob.new_zeros(*shape)
event_prob = torch.cat([dummy_scores, event_prob], -1)
event_prob = torch.softmax(event_prob, -1)
event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings
refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1))
top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep
return top_embeddings
def _local_attention(self, raw_contextualized_embeddings, text_mask):
device = util.get_device_of(raw_contextualized_embeddings)
if device < 0:
device = 'cpu'
attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device)
# attention_mask = attention_mask - torch.eye(text_mask.size(1),
# device=util.get_device_of(contextualized_embeddings))
new_attention_mask = text_mask[:, :, None] * attention_mask
new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size),
-self._local_window_size)
new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings,
new_attention_mask)
return new_contextualized_embeddings
@overrides
def forward(self, # type: ignore
text: Dict[str, torch.LongTensor],
spans: torch.IntTensor,
coref_labels: torch.IntTensor = None,
event_type_labels: torch.IntTensor = None,
realis_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``, required.
The output of a ``TextField`` representing the text of
the document.
spans : ``torch.IntTensor``, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
indices into the text of the document.
coref_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
event_type_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the event label of the specific span.
realis_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the realis label of the specific span.
metadata : ``List[Dict[str, Any]]``, optional (default = None).
A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys
from this dictionary, which respectively have the original text and the annotated gold coreference
clusters for that instance.
Returns
-------
An output dictionary consisting of:
top_spans : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : ``torch.IntTensor``
A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text).float()
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
if self._context_layer:
# Shape: (batch_size, document_length, encoding_dim)
raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)
# span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
else:
raw_contextualized_embeddings = text_embeddings
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
span_embeddings_list = list()
attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)
span_embeddings_list += [attended_span_embeddings]
if self._endpoint_span_extractor is not None:
# Shape: (batch_size, num_spans, embedding_size)
endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
span_embeddings_list += [endpoint_span_embeddings]
span_embeddings = torch.cat(span_embeddings_list, -1)
# event_scores = self._event_classifier.forward(span_embeddings)
# Shape: (batch_size, num_spans, num_event_realis_label)
# Shape: (batch_size, num_spans, num_event_realis_label)
# event_realis_scores = self._event_realis_classifier.forward(span_embeddings)
# Prune based on mention scores.
num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))
(top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep_according_doc_len,
)
event_embeddings = self._get_event_embedding(span_mask)
top_mask = top_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)
# top_span_embeddings = top_span_embeddings.detach()
# top_span_mention_scores = top_span_mention_scores.detach()
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \
_generate_valid_antecedents(num_spans_to_keep_according_doc_len,
max_antecedents,
util.get_device_of(text_mask))
if self._type_refine_gate is not None:
top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,
valid_antecedent_indices).squeeze(-1)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(
event_embeddings,
candidate_antecedent_embeddings)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# (batch_size, event_type_size, 1)
event_type_prior_scores = self._event_scorer(event_embeddings)
# (batch_size, num_spans_to_keep, event_type_size)
event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(
candidate_antecedent_mention_scores.size(0),
candidate_antecedent_mention_scores.size(1),
-1)
# (batch_size, num_spans_to_keep, event_type_size + max_antecedents)
candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,
candidate_antecedent_mention_scores],
-1)
# Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {"top_spans": top_spans,
"antecedent_indices": valid_antecedent_indices,
"predicted_antecedents": predicted_antecedents,
"coreference_scores": coreference_scores,
}
if coref_labels is not None and event_type_labels is not None:
pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)
type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),
top_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
type_antecedent_labels,
antecedent_labels)
bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),
(event_type_labels > 0).float()) * span_mask
bce_loss = bce_loss.sum() * self._bce_loss_weight
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
if self._pretrain_ed:
# All antecedent mask is 0
top_mask = top_mask.expand_as(coreference_scores).clone()
top_mask[:, :, self._positive_label_size + 2:] = 0
coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight
output_dict["loss"] = coref_loss + bce_loss
decoded_result = self.decode(output_dict)
pred_label_spans_list = decoded_result['pred_label_spans']
gold_label_spans_list = [m['gold_label_spans'] for m in metadata]
self._mention_f1_score(pred_label_spans_list,
gold_label_spans_list,
)
self._conll_coref_scores(decoded_result['clusters'],
metadata,
pred_label_spans_list,
gold_label_spans_list)
self._type_loss_metric(bce_loss.item())
self._coref_loss_metric(negative_marginal_log_likelihood.item())
else:
self._coref_loss_metric(0.)
if metadata is not None:
output_dict["document"] = [x["original_text"] for x in metadata]
output_dict["offset"] = [x["token_offset"] for x in metadata]
output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata]
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
return node_decode(output_dict,
self.vocab, decoding_algorithm=self._decoding,
positive_label_size=self._positive_label_size,
type_threshold=self._type_threshold)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_result = self._mention_f1_score.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {"c_p": coref_precision,
"c_r": coref_recall,
"c_f1": coref_f1,
"m_p": mention_result['precision'],
"m_r": mention_result['recall'],
"m_f1": mention_result['f1-score'],
"nil": self._nil_label_metric.get_metric(reset),
"type": self._type_label_metric.get_metric(reset),
"coref": self._coref_label_metric.get_metric(reset),
"t_l": self._type_loss_metric.get_metric(reset),
"c_l": self._coref_loss_metric.get_metric(reset),
"a_f1": (mention_result['f1-score'] + coref_f1) / 2.}
@staticmethod
def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor):
"""
event_embeddings: ``torch.FloatTensor``, required.
Embedding representations of the event types. Has shape
(batch_size, event_type_size, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
return:
(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
"""
event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
event_embeddings.size(1),
antecedent_embeddings.size(3),))
return torch.cat([event_embeddings, antecedent_embeddings], 2)
def _compute_span_pair_embeddings(self,
top_span_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor,
antecedent_offsets: torch.FloatTensor):
"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
---------- shape
(batch_size, event_type_size, embedding_size).
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size).
antecedent_offsets : ``torch.IntTensor``, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (1, max_antecedents).
Returns
-------
span_pair_embeddings : ``torch.FloatTensor``
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (1, max_antecedents)
bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)
# (1, event_type)
label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size))
# Shape: (1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
torch.cat([bucket_values, label_bucket_values], 1)
)
# Shape: (1, 1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)
expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
antecedent_embeddings.size(2),
antecedent_distance_embeddings.size(-1))
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
span_pair_embeddings = torch.cat([target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings], -1)
return span_pair_embeddings
def _compute_antecedent_gold_labels(self,
top_span_labels: torch.IntTensor,
type_antecedent_labels: torch.IntTensor,
antecedent_labels: torch.IntTensor):
"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
Parameters
----------
top_span_labels : ``torch.IntTensor``, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : ``torch.IntTensor``, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
Returns
-------
pairwise_labels_with_dummy_label : ``torch.FloatTensor``
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
# print(top_span_labels)
# print(antecedent_labels)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
if self._pretrain_ed:
pairwise_labels = pairwise_labels * 0
else:
# for pairwise_labels without type_antecedent_labels
pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float()
type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator)
self._coref_label_metric(torch.sum(pairwise_labels).item())
self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item())
self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item())
# print(pairwise_labels)
#
# # Shape: (batch_size, num_spans_to_keep, 1)
# dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(self,
pairwise_embeddings: torch.FloatTensor,
top_span_mention_scores: torch.FloatTensor,
antecedent_mention_scores: torch.FloatTensor,
antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:
"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
Parameters
----------
pairwise_embeddings: ``torch.FloatTensor``, required.
Embedding representations of pairs of spans. Has shape
(batch_size, num_spans_to_keep, max_antecedents, encoding_dim)
top_span_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every span. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every antecedent. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_log_mask: ``torch.FloatTensor``, required.
The log of the mask for valid antecedents.
Returns
-------
coreference_scores: ``torch.FloatTensor``
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0),
antecedent_log_mask.size(1),
self._positive_label_size)),
antecedent_log_mask],
-1)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)
antecedent_scores += top_span_mention_scores + antecedent_mention_scores
antecedent_scores += antecedent_log_mask
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
def _generate_valid_antecedents(num_spans_to_keep: int,
max_antecedents: int,
device: int) -> Tuple[torch.IntTensor,
torch.IntTensor,
torch.FloatTensor]:
"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
Parameters
----------
num_spans_to_keep : ``int``, required.
The number of spans that were kept while pruning.
max_antecedents : ``int``, required.
The maximum number of antecedent spans to consider for every span.
device: ``int``, required.
The CUDA device to use.
Returns
-------
valid_antecedent_indices : ``torch.IntTensor``
The indices of every antecedent to consider with respect to the top k spans.
Has shape ``(num_spans_to_keep, max_antecedents)``.
valid_antecedent_offsets : ``torch.IntTensor``
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape ``(1, max_antecedents)``.
valid_antecedent_log_mask : ``torch.FloatTensor``
The logged mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape ``(1, num_spans_to_keep, max_antecedents)``.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# We're generating a logspace mask here because we will eventually create a
# distribution over these indices, so we need the 0 elements of the mask to be -inf
# in order to not mess up the normalisation of the distribution.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
| 54.514188 | 134 | 0.629271 | 43,414 | 0.90395 | 0 | 0 | 43,462 | 0.904949 | 0 | 0 | 19,498 | 0.40598 |
b98ccbb0c859fdccad6b30924e5845122d497aa5 | 1,964 | py | Python | week2/7litersProblem.py | vietanhtran2710/ArtificialIntelligenceHomework | f4da761016d67477b50856cadf1e2560230d3f79 | [
"MIT"
] | 3 | 2021-09-20T08:32:23.000Z | 2021-09-25T08:11:48.000Z | week2/7litersProblem.py | vietanhtran2710/ArtificialIntelligenceHomework | f4da761016d67477b50856cadf1e2560230d3f79 | [
"MIT"
] | null | null | null | week2/7litersProblem.py | vietanhtran2710/ArtificialIntelligenceHomework | f4da761016d67477b50856cadf1e2560230d3f79 | [
"MIT"
] | null | null | null | """
Given 3 bottles of capacities 3, 5, and 9 liters,
count number of all possible solutions to get 7 liters
"""
current_path = [[0, 0, 0]]
CAPACITIES = (3, 5, 9)
solutions_count = 0
def move_to_new_state(current_state):
global solutions_count, current_path
if 7 in current_state:
solutions_count += 1
else:
# Empty bottle
for i in range(3):
if current_state[i] != 0:
new_state = list(current_state)
new_state[i] = 0
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
# Fill bottle
for i in range(3):
if current_state[i] != CAPACITIES[i]:
new_state = list(current_state)
new_state[i] = CAPACITIES[i]
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
# Pour from one bottle to another
for i in range(3):
for j in range(3):
if i != j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]:
new_state = list(current_state)
liters_change = min(CAPACITIES[j] - current_state[j], current_state[i])
new_state[j] += liters_change
new_state[i] -= liters_change
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
if __name__ == "__main__":
try:
current_state = [0, 0, 0]
move_to_new_state(current_state)
print(solutions_count)
except KeyboardInterrupt:
print(solutions_count)
# Result: at least 44900799 solution
| 35.709091 | 91 | 0.548371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.11558 |
b98d02f62eca1818cb1fb297d1c8644dd35ff288 | 8,263 | py | Python | st2common/st2common/bootstrap/rulesregistrar.py | avezraj/st2 | 519c7f6819e52fb289c440bb7d1df7b558bb9ed7 | [
"Apache-2.0"
] | null | null | null | st2common/st2common/bootstrap/rulesregistrar.py | avezraj/st2 | 519c7f6819e52fb289c440bb7d1df7b558bb9ed7 | [
"Apache-2.0"
] | null | null | null | st2common/st2common/bootstrap/rulesregistrar.py | avezraj/st2 | 519c7f6819e52fb289c440bb7d1df7b558bb9ed7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.constants.pack import DEFAULT_PACK_NAME
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.rule import RuleAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.rule import Rule
from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count
from st2common.exceptions.db import coditationDBObjectNotFoundError
import st2common.content.utils as content_utils
__all__ = [
'RulesRegistrar',
'register_rules'
]
LOG = logging.getLogger(__name__)
class RulesRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_from_packs(self, base_dirs):
"""
:return: Number of rules registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='rules')
for pack, rules_dir in six.iteritems(content):
if not rules_dir:
LOG.debug('Pack %s does not contain rules.', pack)
continue
try:
LOG.debug('Registering rules from pack: %s', pack)
rules = self._get_rules_from_pack(rules_dir)
count = self._register_rules_from_pack(pack, rules)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all rules from pack: %s', rules_dir)
return registered_count
def register_from_pack(self, pack_dir):
"""
Register all the rules from the provided pack.
:return: Number of rules registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='rules')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not rules_dir:
return registered_count
LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir)
try:
rules = self._get_rules_from_pack(rules_dir=rules_dir)
registered_count = self._register_rules_from_pack(pack=pack, rules=rules)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all rules from pack: %s', rules_dir)
return registered_count
def _get_rules_from_pack(self, rules_dir):
return self.get_resources_from_pack(resources_dir=rules_dir)
def _register_rules_from_pack(self, pack, rules):
registered_count = 0
# TODO: Refactor this monstrosity
for rule in rules:
LOG.debug('Loading rule from %s.', rule)
try:
content = self._meta_loader.load(rule)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack,
file_path=rule,
use_pack_cache=True)
content['metadata_file'] = metadata_file
rule_api = RuleAPI(**content)
rule_api.validate()
rule_db = RuleAPI.to_model(rule_api)
# Migration from rule without pack to rule with pack.
# There might be a rule with same name but in pack `default`
# generated in migration script. In this case, we want to
# delete so we don't have duplicates.
if pack_field != DEFAULT_PACK_NAME:
try:
rule_ref = ResourceReference.to_string_reference(name=content['name'],
pack=DEFAULT_PACK_NAME)
LOG.debug('Looking for rule %s in pack %s', content['name'],
DEFAULT_PACK_NAME)
existing = Rule.get_by_ref(rule_ref)
LOG.debug('Existing = %s', existing)
if existing:
LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref)
Rule.delete(existing)
except:
LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME)
try:
rule_ref = ResourceReference.to_string_reference(name=content['name'],
pack=content['pack'])
existing = Rule.get_by_ref(rule_ref)
if existing:
rule_db.id = existing.id
LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id)
except coditationDBObjectNotFoundError:
LOG.debug('Rule %s not found. Creating new one.', rule)
try:
rule_db = Rule.add_or_update(rule_db)
increment_trigger_ref_count(rule_api=rule_api)
extra = {'rule_db': rule_db}
LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra)
except Exception:
LOG.exception('Failed to create rule %s.', rule_api.name)
# If there was an existing rule then the ref count was updated in
# to_model so it needs to be adjusted down here. Also, update could
# lead to removal of a Trigger so now is a good time for book-keeping.
if existing:
cleanup_trigger_db_for_rule(existing)
except Exception as e:
if self._fail_on_failure:
msg = ('Failed to register rule "%s" from pack "%s": %s' % (rule, pack,
six.text_type(e)))
raise ValueError(msg)
LOG.exception('Failed registering rule from %s.', rule)
else:
registered_count += 1
return registered_count
def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = RulesRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_from_packs(base_dirs=packs_base_paths)
return result
| 41.109453 | 98 | 0.57824 | 6,351 | 0.768607 | 0 | 0 | 0 | 0 | 0 | 0 | 2,013 | 0.243616 |
b9912797a8155d6800745fe804b93206d95de8ac | 91,819 | py | Python | sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 1 | 2021-09-07T18:43:20.000Z | 2021-09-07T18:43:20.000Z | sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py | msyyc/azure-sdk-for-python | e2dba75181f8b4336ae57e75aa391322c12c3123 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._cost_management_client_enums import *
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.tags = None
class Alert(Resource):
"""An individual alert.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param definition: defines the type of alert.
:type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition
:param description: Alert description.
:type description: str
:param source: Source of alert. Possible values include: "Preset", "User".
:type source: str or ~azure.mgmt.costmanagement.models.AlertSource
:param details: Alert details.
:type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails
:param cost_entity_id: related budget.
:type cost_entity_id: str
:param status: alert status. Possible values include: "None", "Active", "Overridden",
"Resolved", "Dismissed".
:type status: str or ~azure.mgmt.costmanagement.models.AlertStatus
:param creation_time: dateTime in which alert was created.
:type creation_time: str
:param close_time: dateTime in which alert was closed.
:type close_time: str
:param modification_time: dateTime in which alert was last modified.
:type modification_time: str
:param status_modification_user_name:
:type status_modification_user_name: str
:param status_modification_time: dateTime in which the alert status was last modified.
:type status_modification_time: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},
'description': {'key': 'properties.description', 'type': 'str'},
'source': {'key': 'properties.source', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},
'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'close_time': {'key': 'properties.closeTime', 'type': 'str'},
'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},
'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},
'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},
}
def __init__(
self,
*,
definition: Optional["AlertPropertiesDefinition"] = None,
description: Optional[str] = None,
source: Optional[Union[str, "AlertSource"]] = None,
details: Optional["AlertPropertiesDetails"] = None,
cost_entity_id: Optional[str] = None,
status: Optional[Union[str, "AlertStatus"]] = None,
creation_time: Optional[str] = None,
close_time: Optional[str] = None,
modification_time: Optional[str] = None,
status_modification_user_name: Optional[str] = None,
status_modification_time: Optional[str] = None,
**kwargs
):
super(Alert, self).__init__(**kwargs)
self.definition = definition
self.description = description
self.source = source
self.details = details
self.cost_entity_id = cost_entity_id
self.status = status
self.creation_time = creation_time
self.close_time = close_time
self.modification_time = modification_time
self.status_modification_user_name = status_modification_user_name
self.status_modification_time = status_modification_time
class AlertPropertiesDefinition(msrest.serialization.Model):
"""defines the type of alert.
:param type: type of alert. Possible values include: "Budget", "Invoice", "Credit", "Quota",
"General", "xCloud", "BudgetForecast".
:type type: str or ~azure.mgmt.costmanagement.models.AlertType
:param category: Alert category. Possible values include: "Cost", "Usage", "Billing", "System".
:type category: str or ~azure.mgmt.costmanagement.models.AlertCategory
:param criteria: Criteria that triggered alert. Possible values include:
"CostThresholdExceeded", "UsageThresholdExceeded", "CreditThresholdApproaching",
"CreditThresholdReached", "QuotaThresholdApproaching", "QuotaThresholdReached",
"MultiCurrency", "ForecastCostThresholdExceeded", "ForecastUsageThresholdExceeded",
"InvoiceDueDateApproaching", "InvoiceDueDateReached", "CrossCloudNewDataAvailable",
"CrossCloudCollectionError", "GeneralThresholdError".
:type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'criteria': {'key': 'criteria', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "AlertType"]] = None,
category: Optional[Union[str, "AlertCategory"]] = None,
criteria: Optional[Union[str, "AlertCriteria"]] = None,
**kwargs
):
super(AlertPropertiesDefinition, self).__init__(**kwargs)
self.type = type
self.category = category
self.criteria = criteria
class AlertPropertiesDetails(msrest.serialization.Model):
"""Alert details.
:param time_grain_type: Type of timegrain cadence. Possible values include: "None", "Monthly",
"Quarterly", "Annually", "BillingMonth", "BillingQuarter", "BillingAnnual".
:type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType
:param period_start_date: datetime of periodStartDate.
:type period_start_date: str
:param triggered_by: notificationId that triggered this alert.
:type triggered_by: str
:param resource_group_filter: array of resourceGroups to filter by.
:type resource_group_filter: list[object]
:param resource_filter: array of resources to filter by.
:type resource_filter: list[object]
:param meter_filter: array of meters to filter by.
:type meter_filter: list[object]
:param tag_filter: tags to filter by.
:type tag_filter: object
:param threshold: notification threshold percentage as a decimal which activated this alert.
:type threshold: float
:param operator: operator used to compare currentSpend with amount. Possible values include:
"None", "EqualTo", "GreaterThan", "GreaterThanOrEqualTo", "LessThan", "LessThanOrEqualTo".
:type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator
:param amount: budget threshold amount.
:type amount: float
:param unit: unit of currency being used.
:type unit: str
:param current_spend: current spend.
:type current_spend: float
:param contact_emails: list of emails to contact.
:type contact_emails: list[str]
:param contact_groups: list of action groups to broadcast to.
:type contact_groups: list[str]
:param contact_roles: list of contact roles.
:type contact_roles: list[str]
:param overriding_alert: overriding alert.
:type overriding_alert: str
"""
_attribute_map = {
'time_grain_type': {'key': 'timeGrainType', 'type': 'str'},
'period_start_date': {'key': 'periodStartDate', 'type': 'str'},
'triggered_by': {'key': 'triggeredBy', 'type': 'str'},
'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'},
'resource_filter': {'key': 'resourceFilter', 'type': '[object]'},
'meter_filter': {'key': 'meterFilter', 'type': '[object]'},
'tag_filter': {'key': 'tagFilter', 'type': 'object'},
'threshold': {'key': 'threshold', 'type': 'float'},
'operator': {'key': 'operator', 'type': 'str'},
'amount': {'key': 'amount', 'type': 'float'},
'unit': {'key': 'unit', 'type': 'str'},
'current_spend': {'key': 'currentSpend', 'type': 'float'},
'contact_emails': {'key': 'contactEmails', 'type': '[str]'},
'contact_groups': {'key': 'contactGroups', 'type': '[str]'},
'contact_roles': {'key': 'contactRoles', 'type': '[str]'},
'overriding_alert': {'key': 'overridingAlert', 'type': 'str'},
}
def __init__(
self,
*,
time_grain_type: Optional[Union[str, "AlertTimeGrainType"]] = None,
period_start_date: Optional[str] = None,
triggered_by: Optional[str] = None,
resource_group_filter: Optional[List[object]] = None,
resource_filter: Optional[List[object]] = None,
meter_filter: Optional[List[object]] = None,
tag_filter: Optional[object] = None,
threshold: Optional[float] = None,
operator: Optional[Union[str, "AlertOperator"]] = None,
amount: Optional[float] = None,
unit: Optional[str] = None,
current_spend: Optional[float] = None,
contact_emails: Optional[List[str]] = None,
contact_groups: Optional[List[str]] = None,
contact_roles: Optional[List[str]] = None,
overriding_alert: Optional[str] = None,
**kwargs
):
super(AlertPropertiesDetails, self).__init__(**kwargs)
self.time_grain_type = time_grain_type
self.period_start_date = period_start_date
self.triggered_by = triggered_by
self.resource_group_filter = resource_group_filter
self.resource_filter = resource_filter
self.meter_filter = meter_filter
self.tag_filter = tag_filter
self.threshold = threshold
self.operator = operator
self.amount = amount
self.unit = unit
self.current_spend = current_spend
self.contact_emails = contact_emails
self.contact_groups = contact_groups
self.contact_roles = contact_roles
self.overriding_alert = overriding_alert
class AlertsResult(msrest.serialization.Model):
"""Result of alerts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of alerts.
:vartype value: list[~azure.mgmt.costmanagement.models.Alert]
:ivar next_link: URL to get the next set of alerts results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Alert]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AlertsResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class CommonExportProperties(msrest.serialization.Model):
"""The common properties of the export.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Required. Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Required. Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
"""
_validation = {
'delivery_info': {'required': True},
'definition': {'required': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},
}
def __init__(
self,
*,
delivery_info: "ExportDeliveryInfo",
definition: "ExportDefinition",
format: Optional[Union[str, "FormatType"]] = None,
run_history: Optional["ExportExecutionListResult"] = None,
**kwargs
):
super(CommonExportProperties, self).__init__(**kwargs)
self.format = format
self.delivery_info = delivery_info
self.definition = definition
self.run_history = run_history
self.next_run_time_estimate = None
class Dimension(Resource):
"""Dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar description: Dimension description.
:vartype description: str
:ivar filter_enabled: Filter enabled.
:vartype filter_enabled: bool
:ivar grouping_enabled: Grouping enabled.
:vartype grouping_enabled: bool
:param data:
:type data: list[str]
:ivar total: Total number of data for the dimension.
:vartype total: int
:ivar category: Dimension category.
:vartype category: str
:ivar usage_start: Usage start.
:vartype usage_start: ~datetime.datetime
:ivar usage_end: Usage end.
:vartype usage_end: ~datetime.datetime
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
'description': {'readonly': True},
'filter_enabled': {'readonly': True},
'grouping_enabled': {'readonly': True},
'total': {'readonly': True},
'category': {'readonly': True},
'usage_start': {'readonly': True},
'usage_end': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'},
'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'},
'data': {'key': 'properties.data', 'type': '[str]'},
'total': {'key': 'properties.total', 'type': 'int'},
'category': {'key': 'properties.category', 'type': 'str'},
'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'},
'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
data: Optional[List[str]] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.description = None
self.filter_enabled = None
self.grouping_enabled = None
self.data = data
self.total = None
self.category = None
self.usage_start = None
self.usage_end = None
self.next_link = None
class DimensionsListResult(msrest.serialization.Model):
"""Result of listing dimensions. It contains a list of available dimensions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of dimensions.
:vartype value: list[~azure.mgmt.costmanagement.models.Dimension]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Dimension]'},
}
def __init__(
self,
**kwargs
):
super(DimensionsListResult, self).__init__(**kwargs)
self.value = None
class DismissAlertPayload(msrest.serialization.Model):
"""The request payload to update an alert.
:param definition: defines the type of alert.
:type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition
:param description: Alert description.
:type description: str
:param source: Source of alert. Possible values include: "Preset", "User".
:type source: str or ~azure.mgmt.costmanagement.models.AlertSource
:param details: Alert details.
:type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails
:param cost_entity_id: related budget.
:type cost_entity_id: str
:param status: alert status. Possible values include: "None", "Active", "Overridden",
"Resolved", "Dismissed".
:type status: str or ~azure.mgmt.costmanagement.models.AlertStatus
:param creation_time: dateTime in which alert was created.
:type creation_time: str
:param close_time: dateTime in which alert was closed.
:type close_time: str
:param modification_time: dateTime in which alert was last modified.
:type modification_time: str
:param status_modification_user_name:
:type status_modification_user_name: str
:param status_modification_time: dateTime in which the alert status was last modified.
:type status_modification_time: str
"""
_attribute_map = {
'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},
'description': {'key': 'properties.description', 'type': 'str'},
'source': {'key': 'properties.source', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},
'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'close_time': {'key': 'properties.closeTime', 'type': 'str'},
'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},
'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},
'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},
}
def __init__(
self,
*,
definition: Optional["AlertPropertiesDefinition"] = None,
description: Optional[str] = None,
source: Optional[Union[str, "AlertSource"]] = None,
details: Optional["AlertPropertiesDetails"] = None,
cost_entity_id: Optional[str] = None,
status: Optional[Union[str, "AlertStatus"]] = None,
creation_time: Optional[str] = None,
close_time: Optional[str] = None,
modification_time: Optional[str] = None,
status_modification_user_name: Optional[str] = None,
status_modification_time: Optional[str] = None,
**kwargs
):
super(DismissAlertPayload, self).__init__(**kwargs)
self.definition = definition
self.description = description
self.source = source
self.details = details
self.cost_entity_id = cost_entity_id
self.status = status
self.creation_time = creation_time
self.close_time = close_time
self.modification_time = modification_time
self.status_modification_user_name = status_modification_user_name
self.status_modification_time = status_modification_time
class ErrorDetails(msrest.serialization.Model):
"""The details of the error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.message = None
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message.
Some Error responses:
*
429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the "x-ms-ratelimit-microsoft.consumption-retry-after" header.
*
503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the "Retry-After" header.
:param error: The details of the error.
:type error: ~azure.mgmt.costmanagement.models.ErrorDetails
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetails'},
}
def __init__(
self,
*,
error: Optional["ErrorDetails"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ProxyResource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.e_tag = e_tag
class Export(ProxyResource):
"""An export resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
:param schedule: Has schedule information for the export.
:type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'format': {'key': 'properties.format', 'type': 'str'},
'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'},
'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
format: Optional[Union[str, "FormatType"]] = None,
delivery_info: Optional["ExportDeliveryInfo"] = None,
definition: Optional["ExportDefinition"] = None,
run_history: Optional["ExportExecutionListResult"] = None,
schedule: Optional["ExportSchedule"] = None,
**kwargs
):
super(Export, self).__init__(e_tag=e_tag, **kwargs)
self.format = format
self.delivery_info = delivery_info
self.definition = definition
self.run_history = run_history
self.next_run_time_estimate = None
self.schedule = schedule
class ExportDataset(msrest.serialization.Model):
"""The definition for data in the export.
:param granularity: The granularity of rows in the export. Currently only 'Daily' is supported.
Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: The export dataset configuration.
:type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["ExportDatasetConfiguration"] = None,
**kwargs
):
super(ExportDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
class ExportDatasetConfiguration(msrest.serialization.Model):
"""The export dataset configuration. Allows columns to be selected for the export. If not provided then the export will include all available columns.
:param columns: Array of column names to be included in the export. If not provided then the
export will include all available columns. The available columns can vary by customer channel
(see examples).
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(ExportDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class ExportDefinition(msrest.serialization.Model):
"""The definition of an export.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the export. Note that 'Usage' is equivalent to 'ActualCost'
and is applicable to exports that do not yet provide data for charges or amortization for
service reservations. Possible values include: "Usage", "ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ExportType
:param timeframe: Required. The time frame for pulling data for the export. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType
:param time_period: Has time period for pulling data for the export.
:type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod
:param data_set: The definition for data in the export.
:type data_set: ~azure.mgmt.costmanagement.models.ExportDataset
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'},
'data_set': {'key': 'dataSet', 'type': 'ExportDataset'},
}
def __init__(
self,
*,
type: Union[str, "ExportType"],
timeframe: Union[str, "TimeframeType"],
time_period: Optional["ExportTimePeriod"] = None,
data_set: Optional["ExportDataset"] = None,
**kwargs
):
super(ExportDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.data_set = data_set
class ExportDeliveryDestination(msrest.serialization.Model):
"""The destination information for the delivery of the export. To allow access to a storage account, you must register the account's subscription with the Microsoft.CostManagementExports resource provider. This is required once per subscription. When creating an export in the Azure portal, it is done automatically, however API users need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services .
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource id of the storage account where exports will be
delivered.
:type resource_id: str
:param container: Required. The name of the container where exports will be uploaded.
:type container: str
:param root_folder_path: The name of the directory where exports will be uploaded.
:type root_folder_path: str
"""
_validation = {
'resource_id': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: str,
container: str,
root_folder_path: Optional[str] = None,
**kwargs
):
super(ExportDeliveryDestination, self).__init__(**kwargs)
self.resource_id = resource_id
self.container = container
self.root_folder_path = root_folder_path
class ExportDeliveryInfo(msrest.serialization.Model):
"""The delivery information associated with a export.
All required parameters must be populated in order to send to Azure.
:param destination: Required. Has destination for the export being delivered.
:type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination
"""
_validation = {
'destination': {'required': True},
}
_attribute_map = {
'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'},
}
def __init__(
self,
*,
destination: "ExportDeliveryDestination",
**kwargs
):
super(ExportDeliveryInfo, self).__init__(**kwargs)
self.destination = destination
class ExportExecution(Resource):
"""An export execution.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param execution_type: The type of the export execution. Possible values include: "OnDemand",
"Scheduled".
:type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType
:param status: The last known status of the export execution. Possible values include:
"Queued", "InProgress", "Completed", "Failed", "Timeout", "NewDataNotAvailable",
"DataNotAvailable".
:type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus
:param submitted_by: The identifier for the entity that executed the export. For OnDemand
executions it is the user email. For scheduled executions it is 'System'.
:type submitted_by: str
:param submitted_time: The time when export was queued to be executed.
:type submitted_time: ~datetime.datetime
:param processing_start_time: The time when export was picked up to be executed.
:type processing_start_time: ~datetime.datetime
:param processing_end_time: The time when the export execution finished.
:type processing_end_time: ~datetime.datetime
:param file_name: The name of the exported file.
:type file_name: str
:param run_settings: The export settings that were in effect for this execution.
:type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties
:param error: The details of any error.
:type error: ~azure.mgmt.costmanagement.models.ErrorDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'execution_type': {'key': 'properties.executionType', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'},
'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'},
'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'},
'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'},
'file_name': {'key': 'properties.fileName', 'type': 'str'},
'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'},
'error': {'key': 'properties.error', 'type': 'ErrorDetails'},
}
def __init__(
self,
*,
execution_type: Optional[Union[str, "ExecutionType"]] = None,
status: Optional[Union[str, "ExecutionStatus"]] = None,
submitted_by: Optional[str] = None,
submitted_time: Optional[datetime.datetime] = None,
processing_start_time: Optional[datetime.datetime] = None,
processing_end_time: Optional[datetime.datetime] = None,
file_name: Optional[str] = None,
run_settings: Optional["CommonExportProperties"] = None,
error: Optional["ErrorDetails"] = None,
**kwargs
):
super(ExportExecution, self).__init__(**kwargs)
self.execution_type = execution_type
self.status = status
self.submitted_by = submitted_by
self.submitted_time = submitted_time
self.processing_start_time = processing_start_time
self.processing_end_time = processing_end_time
self.file_name = file_name
self.run_settings = run_settings
self.error = error
class ExportExecutionListResult(msrest.serialization.Model):
"""Result of listing the execution history of an export.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A list of export executions.
:vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExportExecution]'},
}
def __init__(
self,
**kwargs
):
super(ExportExecutionListResult, self).__init__(**kwargs)
self.value = None
class ExportListResult(msrest.serialization.Model):
"""Result of listing exports. It contains a list of available exports in the scope provided.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of exports.
:vartype value: list[~azure.mgmt.costmanagement.models.Export]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Export]'},
}
def __init__(
self,
**kwargs
):
super(ExportListResult, self).__init__(**kwargs)
self.value = None
class ExportProperties(CommonExportProperties):
"""The properties of the export.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Required. Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Required. Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
:param schedule: Has schedule information for the export.
:type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule
"""
_validation = {
'delivery_info': {'required': True},
'definition': {'required': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'ExportSchedule'},
}
def __init__(
self,
*,
delivery_info: "ExportDeliveryInfo",
definition: "ExportDefinition",
format: Optional[Union[str, "FormatType"]] = None,
run_history: Optional["ExportExecutionListResult"] = None,
schedule: Optional["ExportSchedule"] = None,
**kwargs
):
super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs)
self.schedule = schedule
class ExportRecurrencePeriod(msrest.serialization.Model):
"""The start and end date for recurrence schedule.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date of recurrence.
:type from_property: ~datetime.datetime
:param to: The end date of recurrence.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: Optional[datetime.datetime] = None,
**kwargs
):
super(ExportRecurrencePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ExportSchedule(msrest.serialization.Model):
"""The schedule associated with the export.
All required parameters must be populated in order to send to Azure.
:param status: The status of the export's schedule. If 'Inactive', the export's schedule is
paused. Possible values include: "Active", "Inactive".
:type status: str or ~azure.mgmt.costmanagement.models.StatusType
:param recurrence: Required. The schedule recurrence. Possible values include: "Daily",
"Weekly", "Monthly", "Annually".
:type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType
:param recurrence_period: Has start and end date of the recurrence. The start date must be in
future. If present, the end date must be greater than start date.
:type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod
"""
_validation = {
'recurrence': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'recurrence': {'key': 'recurrence', 'type': 'str'},
'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'},
}
def __init__(
self,
*,
recurrence: Union[str, "RecurrenceType"],
status: Optional[Union[str, "StatusType"]] = None,
recurrence_period: Optional["ExportRecurrencePeriod"] = None,
**kwargs
):
super(ExportSchedule, self).__init__(**kwargs)
self.status = status
self.recurrence = recurrence
self.recurrence_period = recurrence_period
class ExportTimePeriod(msrest.serialization.Model):
"""The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date for export data.
:type from_property: ~datetime.datetime
:param to: Required. The end date for export data.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ExportTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ForecastDataset(msrest.serialization.Model):
"""The definition of data present in the forecast.
:param granularity: The granularity of rows in the forecast. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the forecast. The key of
each item in the dictionary is the alias for the aggregated column. forecast can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param filter: Has filter expression to use in the forecast.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(ForecastDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.filter = filter
class ForecastDefinition(msrest.serialization.Model):
"""The definition of a forecast.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the forecast. Possible values include: "Usage",
"ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ForecastType
:param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType
:param time_period: Has time period for pulling data for the forecast.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this forecast.
:type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset
:param include_actual_cost: a boolean determining if actualCost will be included.
:type include_actual_cost: bool
:param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included.
:type include_fresh_partial_cost: bool
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ForecastDataset'},
'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'},
'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'},
}
def __init__(
self,
*,
type: Union[str, "ForecastType"],
timeframe: Union[str, "ForecastTimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["ForecastDataset"] = None,
include_actual_cost: Optional[bool] = None,
include_fresh_partial_cost: Optional[bool] = None,
**kwargs
):
super(ForecastDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
self.include_actual_cost = include_actual_cost
self.include_fresh_partial_cost = include_fresh_partial_cost
class KpiProperties(msrest.serialization.Model):
"""Each KPI must contain a 'type' and 'enabled' key.
:param type: KPI type (Forecast, Budget). Possible values include: "Forecast", "Budget".
:type type: str or ~azure.mgmt.costmanagement.models.KpiType
:param id: ID of resource related to metric (budget).
:type id: str
:param enabled: show the KPI in the UI?.
:type enabled: bool
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
type: Optional[Union[str, "KpiType"]] = None,
id: Optional[str] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(KpiProperties, self).__init__(**kwargs)
self.type = type
self.id = id
self.enabled = enabled
class Operation(msrest.serialization.Model):
"""A Cost management REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.costmanagement.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft.CostManagement.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Dimensions, Query.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
class OperationListResult(msrest.serialization.Model):
"""Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of cost management operations supported by the Microsoft.CostManagement
resource provider.
:vartype value: list[~azure.mgmt.costmanagement.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PivotProperties(msrest.serialization.Model):
"""Each pivot must contain a 'type' and 'name'.
:param type: Data type to show in view. Possible values include: "Dimension", "TagKey".
:type type: str or ~azure.mgmt.costmanagement.models.PivotType
:param name: Data field to show in view.
:type name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "PivotType"]] = None,
name: Optional[str] = None,
**kwargs
):
super(PivotProperties, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(QueryAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class QueryColumn(msrest.serialization.Model):
"""QueryColumn.
:param name: The name of column.
:type name: str
:param type: The type of column.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(QueryColumn, self).__init__(**kwargs)
self.name = name
self.type = type
class QueryComparisonExpression(msrest.serialization.Model):
"""The comparison expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to use in comparison.
:type name: str
:param operator: Required. The operator to use for comparison. Possible values include: "In",
"Contains".
:type operator: str or ~azure.mgmt.costmanagement.models.OperatorType
:param values: Required. Array of values to use for comparison.
:type values: list[str]
"""
_validation = {
'name': {'required': True},
'operator': {'required': True},
'values': {'required': True, 'min_items': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
}
def __init__(
self,
*,
name: str,
operator: Union[str, "OperatorType"],
values: List[str],
**kwargs
):
super(QueryComparisonExpression, self).__init__(**kwargs)
self.name = name
self.operator = operator
self.values = values
class QueryDataset(msrest.serialization.Model):
"""The definition of data present in the query.
:param granularity: The granularity of rows in the query. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the query. The key of each
item in the dictionary is the alias for the aggregated column. Query can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param grouping: Array of group by expression to use in the query. Query can have up to 2 group
by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping]
:param filter: Has filter expression to use in the query.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
grouping: Optional[List["QueryGrouping"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(QueryDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.filter = filter
class QueryDatasetConfiguration(msrest.serialization.Model):
"""The configuration of dataset in the query.
:param columns: Array of column names to be included in the query. Any valid query column name
is allowed. If not provided, then query includes all columns.
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(QueryDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class QueryDefinition(msrest.serialization.Model):
"""The definition of a query.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the query. Possible values include: "Usage", "ActualCost",
"AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ExportType
:param timeframe: Required. The time frame for pulling data for the query. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType
:param time_period: Has time period for pulling data for the query.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this query.
:type dataset: ~azure.mgmt.costmanagement.models.QueryDataset
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'QueryDataset'},
}
def __init__(
self,
*,
type: Union[str, "ExportType"],
timeframe: Union[str, "TimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["QueryDataset"] = None,
**kwargs
):
super(QueryDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class QueryFilter(msrest.serialization.Model):
"""The filter expression to be used in the export.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.QueryFilter
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[QueryFilter]'},
'or_property': {'key': 'or', 'type': '[QueryFilter]'},
'not_property': {'key': 'not', 'type': 'QueryFilter'},
'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'},
'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["QueryFilter"]] = None,
or_property: Optional[List["QueryFilter"]] = None,
not_property: Optional["QueryFilter"] = None,
dimension: Optional["QueryComparisonExpression"] = None,
tag: Optional["QueryComparisonExpression"] = None,
**kwargs
):
super(QueryFilter, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class QueryGrouping(msrest.serialization.Model):
"""The group by expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param type: Required. Has type of the column to group. Possible values include: "Tag",
"Dimension".
:type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType
:param name: Required. The name of the column to group.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "QueryColumnType"],
name: str,
**kwargs
):
super(QueryGrouping, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryResult(Resource):
"""Result of query. It contains all columns listed under groupings and aggregation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param next_link: The link (url) to the next page of results.
:type next_link: str
:param columns: Array of columns.
:type columns: list[~azure.mgmt.costmanagement.models.QueryColumn]
:param rows: Array of rows.
:type rows: list[list[object]]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'},
'rows': {'key': 'properties.rows', 'type': '[[object]]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
columns: Optional[List["QueryColumn"]] = None,
rows: Optional[List[List[object]]] = None,
**kwargs
):
super(QueryResult, self).__init__(**kwargs)
self.next_link = next_link
self.columns = columns
self.rows = rows
class QueryTimePeriod(msrest.serialization.Model):
"""The start and end date for pulling data for the query.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date to pull data from.
:type from_property: ~datetime.datetime
:param to: Required. The end date to pull data to.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(QueryTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ReportConfigAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(ReportConfigAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class ReportConfigComparisonExpression(msrest.serialization.Model):
"""The comparison expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to use in comparison.
:type name: str
:param operator: Required. The operator to use for comparison. Possible values include: "In",
"Contains".
:type operator: str or ~azure.mgmt.costmanagement.models.OperatorType
:param values: Required. Array of values to use for comparison.
:type values: list[str]
"""
_validation = {
'name': {'required': True},
'operator': {'required': True},
'values': {'required': True, 'min_items': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
}
def __init__(
self,
*,
name: str,
operator: Union[str, "OperatorType"],
values: List[str],
**kwargs
):
super(ReportConfigComparisonExpression, self).__init__(**kwargs)
self.name = name
self.operator = operator
self.values = values
class ReportConfigDataset(msrest.serialization.Model):
"""The definition of data present in the report.
:param granularity: The granularity of rows in the report. Possible values include: "Daily",
"Monthly".
:type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType
:param configuration: Has configuration information for the data in the report. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the report. The key of each
item in the dictionary is the alias for the aggregated column. Report can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]
:param grouping: Array of group by expression to use in the report. Report can have up to 2
group by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]
:param sorting: Array of order by expression to use in the report.
:type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]
:param filter: Has filter expression to use in the report.
:type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},
'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},
'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},
'filter': {'key': 'filter', 'type': 'ReportConfigFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "ReportGranularityType"]] = None,
configuration: Optional["ReportConfigDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None,
grouping: Optional[List["ReportConfigGrouping"]] = None,
sorting: Optional[List["ReportConfigSorting"]] = None,
filter: Optional["ReportConfigFilter"] = None,
**kwargs
):
super(ReportConfigDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.sorting = sorting
self.filter = filter
class ReportConfigDatasetAutoGenerated(msrest.serialization.Model):
"""The definition of data present in the report.
:param granularity: The granularity of rows in the report. Possible values include: "Daily",
"Monthly".
:type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType
:param configuration: Has configuration information for the data in the report. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the report. The key of each
item in the dictionary is the alias for the aggregated column. Report can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]
:param grouping: Array of group by expression to use in the report. Report can have up to 2
group by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]
:param sorting: Array of order by expression to use in the report.
:type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]
:param filter: Has filter expression to use in the report.
:type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},
'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},
'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},
'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "ReportGranularityType"]] = None,
configuration: Optional["ReportConfigDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None,
grouping: Optional[List["ReportConfigGrouping"]] = None,
sorting: Optional[List["ReportConfigSorting"]] = None,
filter: Optional["ReportConfigFilterAutoGenerated"] = None,
**kwargs
):
super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.sorting = sorting
self.filter = filter
class ReportConfigDatasetConfiguration(msrest.serialization.Model):
"""The configuration of dataset in the report.
:param columns: Array of column names to be included in the report. Any valid report column
name is allowed. If not provided, then report includes all columns.
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(ReportConfigDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class ReportConfigDefinition(msrest.serialization.Model):
"""The definition of a report config.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the report. Usage represents actual usage, forecast
represents forecasted data and UsageAndForecast represents both usage and forecasted data.
Actual usage and forecasted data can be differentiated based on dates. Possible values include:
"Usage".
:type type: str or ~azure.mgmt.costmanagement.models.ReportType
:param timeframe: Required. The time frame for pulling data for the report. If custom, then a
specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate",
"YearToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'},
}
def __init__(
self,
*,
type: Union[str, "ReportType"],
timeframe: Union[str, "ReportTimeframeType"],
time_period: Optional["ReportConfigTimePeriod"] = None,
dataset: Optional["ReportConfigDatasetAutoGenerated"] = None,
**kwargs
):
super(ReportConfigDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class ReportConfigFilter(msrest.serialization.Model):
"""The filter expression to be used in the report.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'},
'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'},
'not_property': {'key': 'not', 'type': 'ReportConfigFilter'},
'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},
'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["ReportConfigFilter"]] = None,
or_property: Optional[List["ReportConfigFilter"]] = None,
not_property: Optional["ReportConfigFilter"] = None,
dimension: Optional["ReportConfigComparisonExpression"] = None,
tag: Optional["ReportConfigComparisonExpression"] = None,
**kwargs
):
super(ReportConfigFilter, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class ReportConfigFilterAutoGenerated(msrest.serialization.Model):
"""The filter expression to be used in the report.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'},
'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'},
'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'},
'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},
'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None,
or_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None,
not_property: Optional["ReportConfigFilterAutoGenerated"] = None,
dimension: Optional["ReportConfigComparisonExpression"] = None,
tag: Optional["ReportConfigComparisonExpression"] = None,
**kwargs
):
super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class ReportConfigGrouping(msrest.serialization.Model):
"""The group by expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param type: Required. Has type of the column to group. Possible values include: "Tag",
"Dimension".
:type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType
:param name: Required. The name of the column to group. This version supports subscription
lowest possible grain.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "ReportConfigColumnType"],
name: str,
**kwargs
):
super(ReportConfigGrouping, self).__init__(**kwargs)
self.type = type
self.name = name
class ReportConfigSorting(msrest.serialization.Model):
"""The order by expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param direction: Direction of sort. Possible values include: "Ascending", "Descending".
:type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection
:param name: Required. The name of the column to sort.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'direction': {'key': 'direction', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
direction: Optional[Union[str, "ReportConfigSortingDirection"]] = None,
**kwargs
):
super(ReportConfigSorting, self).__init__(**kwargs)
self.direction = direction
self.name = name
class ReportConfigTimePeriod(msrest.serialization.Model):
"""The start and end date for pulling data for the report.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date to pull data from.
:type from_property: ~datetime.datetime
:param to: Required. The end date to pull data to.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ReportConfigTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class View(ProxyResource):
"""States and configurations of Cost Analysis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param display_name: User input name of the view. Required.
:type display_name: str
:param scope: Cost Management scope to save the view on. This includes
'subscriptions/{subscriptionId}' for subscription scope,
'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for
Department scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for BillingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'
for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'
for Management Group scope,
'/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for
ExternalBillingAccount scope, and
'/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for
ExternalSubscription scope.
:type scope: str
:ivar created_on: Date the user created this view.
:vartype created_on: ~datetime.datetime
:ivar modified_on: Date when the user last modified this view.
:vartype modified_on: ~datetime.datetime
:param chart: Chart type of the main view in Cost Analysis. Required. Possible values include:
"Area", "Line", "StackedColumn", "GroupedColumn", "Table".
:type chart: str or ~azure.mgmt.costmanagement.models.ChartType
:param accumulated: Show costs accumulated over time. Possible values include: "true", "false".
:type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType
:param metric: Metric to use when displaying costs. Possible values include: "ActualCost",
"AmortizedCost", "AHUB".
:type metric: str or ~azure.mgmt.costmanagement.models.MetricType
:param kpis: List of KPIs to show in Cost Analysis UI.
:type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties]
:param pivots: Configuration of 3 sub-views in the Cost Analysis UI.
:type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties]
:param type_properties_query_type: The type of the report. Usage represents actual usage,
forecast represents forecasted data and UsageAndForecast represents both usage and forecasted
data. Actual usage and forecasted data can be differentiated based on dates. Possible values
include: "Usage".
:type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType
:param timeframe: The time frame for pulling data for the report. If custom, then a specific
time period must be provided. Possible values include: "WeekToDate", "MonthToDate",
"YearToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'},
'chart': {'key': 'properties.chart', 'type': 'str'},
'accumulated': {'key': 'properties.accumulated', 'type': 'str'},
'metric': {'key': 'properties.metric', 'type': 'str'},
'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'},
'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'},
'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'},
'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'},
'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'},
'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
display_name: Optional[str] = None,
scope: Optional[str] = None,
chart: Optional[Union[str, "ChartType"]] = None,
accumulated: Optional[Union[str, "AccumulatedType"]] = None,
metric: Optional[Union[str, "MetricType"]] = None,
kpis: Optional[List["KpiProperties"]] = None,
pivots: Optional[List["PivotProperties"]] = None,
type_properties_query_type: Optional[Union[str, "ReportType"]] = None,
timeframe: Optional[Union[str, "ReportTimeframeType"]] = None,
time_period: Optional["ReportConfigTimePeriod"] = None,
dataset: Optional["ReportConfigDataset"] = None,
**kwargs
):
super(View, self).__init__(e_tag=e_tag, **kwargs)
self.display_name = display_name
self.scope = scope
self.created_on = None
self.modified_on = None
self.chart = chart
self.accumulated = accumulated
self.metric = metric
self.kpis = kpis
self.pivots = pivots
self.type_properties_query_type = type_properties_query_type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class ViewListResult(msrest.serialization.Model):
"""Result of listing views. It contains a list of available views.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of views.
:vartype value: list[~azure.mgmt.costmanagement.models.View]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[View]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ViewListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
| 38.16251 | 498 | 0.649005 | 90,996 | 0.991037 | 0 | 0 | 0 | 0 | 0 | 0 | 57,015 | 0.62095 |
b9921ebf7fdd9b5fb1dd763092a97ae1888e730f | 3,860 | py | Python | test/test_simple_compression.py | jayvdb/brotlipy | ffddf2ea5adc584c8c353d246bb1077b7e781b63 | [
"MIT"
] | null | null | null | test/test_simple_compression.py | jayvdb/brotlipy | ffddf2ea5adc584c8c353d246bb1077b7e781b63 | [
"MIT"
] | null | null | null | test/test_simple_compression.py | jayvdb/brotlipy | ffddf2ea5adc584c8c353d246bb1077b7e781b63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_simple_compression
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for compression of single chunks.
"""
import brotli
import pytest
from hypothesis import given
from hypothesis.strategies import binary, integers, sampled_from, one_of
def test_roundtrip_compression_with_files(simple_compressed_file):
"""
Roundtripping data through the compressor works correctly.
"""
with open(simple_compressed_file[0], 'rb') as f:
uncompressed_data = f.read()
assert brotli.decompress(
brotli.compress(uncompressed_data)
) == uncompressed_data
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression_flush(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected, including flushes
after each chunk.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.flush())
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(binary())
def test_compressed_data_roundtrips(s):
assert brotli.decompress(brotli.compress(s)) == s
@given(binary(), binary())
def test_compressed_data_with_dictionaries(s, dictionary):
d = brotli.Decompressor(dictionary)
compressed = brotli.compress(s, dictionary=dictionary)
uncompressed = d.decompress(compressed)
assert uncompressed == s
@pytest.mark.parametrize(
"params",
[
{"mode": 52},
{"quality": 52},
{"lgwin": 52},
{"lgblock": 52},
]
)
@pytest.mark.parametrize("exception_cls", [brotli.Error, brotli.error])
def test_bad_compressor_parameters(params, exception_cls):
with pytest.raises(exception_cls):
brotli.Compressor(**params)
| 29.692308 | 78 | 0.615803 | 0 | 0 | 0 | 0 | 3,251 | 0.842228 | 0 | 0 | 459 | 0.118912 |
b992a4ec960bcf3e39ba5a1bb6a8cd2e68be293e | 1,987 | py | Python | wexapi/models/ticker.py | madmis/wexapi | f5b1b9b566f767bca7d8fad1f08c3d1bca42355a | [
"MIT"
] | 3 | 2018-06-08T12:45:04.000Z | 2018-08-02T11:09:11.000Z | wexapi/models/ticker.py | madmis/wexapi | f5b1b9b566f767bca7d8fad1f08c3d1bca42355a | [
"MIT"
] | null | null | null | wexapi/models/ticker.py | madmis/wexapi | f5b1b9b566f767bca7d8fad1f08c3d1bca42355a | [
"MIT"
] | null | null | null | from decimal import Decimal
class Ticker(object):
def __init__(
self,
high: float,
low: float,
avg: float,
vol: float,
vol_cur: int,
last: float,
buy: float,
sell: float,
updated: int,
):
self.high = high
self.low = low
self.avg = avg
self.vol = vol
self.vol_cur = vol_cur
self.last = last
self.buy = buy
self.sell = sell
self.updated = updated
@property
def high(self) -> Decimal:
return self._high
@high.setter
def high(self, value: float):
self._high = Decimal(value)
@property
def low(self) -> Decimal:
return self._low
@low.setter
def low(self, value: float):
self._low = Decimal(value)
@property
def avg(self) -> Decimal:
return self._avg
@avg.setter
def avg(self, value: float):
self._avg = Decimal(value)
@property
def vol(self) -> Decimal:
return self._vol
@vol.setter
def vol(self, value: float):
self._vol = Decimal(value)
@property
def vol_cur(self) -> Decimal:
return self._vol_cur
@vol_cur.setter
def vol_cur(self, value: float):
self._vol_cur = Decimal(value)
@property
def last(self) -> Decimal:
return self._last
@last.setter
def last(self, value: float):
self._last = Decimal(value)
@property
def buy(self) -> Decimal:
return self._buy
@buy.setter
def buy(self, value: float):
self._buy = Decimal(value)
@property
def sell(self) -> Decimal:
return self._sell
@sell.setter
def sell(self, value: float):
self._sell = Decimal(value)
@property
def updated(self) -> int:
return self._updated
@updated.setter
def updated(self, value: int):
self._updated = int(value)
| 20.27551 | 38 | 0.545546 | 1,956 | 0.984399 | 0 | 0 | 1,332 | 0.670357 | 0 | 0 | 0 | 0 |
b99506d26f9716e398b3a3724d393185a9900942 | 1,216 | py | Python | hard-gists/98bb452dc14e8c40e403/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/98bb452dc14e8c40e403/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/98bb452dc14e8c40e403/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | from scryptos import *
p1 = 32581479300404876772405716877547
p2 = 27038194053540661979045656526063
p3 = 26440615366395242196516853423447
n = p1*p2*p3
e = 3
c = int(open("flag.enc", "rb").read().encode("hex"), 16)
# from User's Guide to PARI/GP, nth_root function
sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error("Impossible case in sqrtn"));if(type(x)=="t_INTMOD"||type(x)=="t_PADIC",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}'
c1 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p1)]))
c2 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p2)]))
c3 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p3)]))
"""
c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629]
c2 = [19616973567618515464515107624812]
c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946]
"""
for x in c1:
for y in c2:
for z in c3:
crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)])
d = hex(crt, 2)[2:].decode("hex")
if "0ctf" in d:
print d[d.find("0ctf"):].strip()
| 39.225806 | 224 | 0.663651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.557566 |
b9954284c404c9a5aed225965d5006c8735af349 | 1,717 | py | Python | musa/migrations/0001_initial.py | ccsreenidhin/Music-Web-Django | 9b8286914f9099b9ed56c712c7ca384846f189d1 | [
"MIT"
] | null | null | null | musa/migrations/0001_initial.py | ccsreenidhin/Music-Web-Django | 9b8286914f9099b9ed56c712c7ca384846f189d1 | [
"MIT"
] | null | null | null | musa/migrations/0001_initial.py | ccsreenidhin/Music-Web-Django | 9b8286914f9099b9ed56c712c7ca384846f189d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-29 06:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import musa.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MusicCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=70, null=True)),
('document', models.FileField(upload_to=musa.models.get_upload_path)),
('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(blank=True, max_length=70)),
('favourite_music', models.CharField(blank=True, max_length=70)),
('about', models.TextField(blank=True, max_length=300)),
('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 40.880952 | 121 | 0.633663 | 1,476 | 0.859639 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.142691 |
b9957182927ee0480e35dd837a4d9ee2d8587462 | 3,207 | py | Python | nuitka/codegen/LoopCodes.py | RESP3CT88/Nuitka | 0fcc25d9f00c4fc78c79a863c4b7987f573962e1 | [
"Apache-2.0"
] | 1 | 2021-05-25T12:48:28.000Z | 2021-05-25T12:48:28.000Z | venv/Lib/site-packages/nuitka/codegen/LoopCodes.py | matthijsvanvliet/raytracing-python | 73d692b47330ab94eedde579a51063e3a907e92b | [
"MIT"
] | null | null | null | venv/Lib/site-packages/nuitka/codegen/LoopCodes.py | matthijsvanvliet/raytracing-python | 73d692b47330ab94eedde579a51063e3a907e92b | [
"MIT"
] | null | null | null | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop codes.
Code generation for loops, breaking them, or continuing them. In Nuitka, there
are no for-loops or while-loops at this point. They have been re-formulated in
a simpler loop without a condition, and statements there-in that break under
certain conditions.
See Developer Manual for how the CPython loops are mapped to these nodes.
"""
from .CodeHelpers import generateStatementSequenceCode
from .ErrorCodes import getErrorExitBoolCode
from .ExceptionCodes import getExceptionUnpublishedReleaseCode
from .LabelCodes import getGotoCode, getLabelCode
def generateLoopBreakCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
break_target = context.getLoopBreakTarget()
getGotoCode(break_target, emit)
def generateLoopContinueCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
continue_target = context.getLoopContinueTarget()
getGotoCode(continue_target, emit)
def generateLoopCode(statement, emit, context):
loop_start_label = context.allocateLabel("loop_start")
if not statement.isStatementAborting():
loop_end_label = context.allocateLabel("loop_end")
else:
loop_end_label = None
getLabelCode(loop_start_label, emit)
old_loop_break = context.setLoopBreakTarget(loop_end_label)
old_loop_continue = context.setLoopContinueTarget(loop_start_label)
generateStatementSequenceCode(
statement_sequence=statement.subnode_loop_body,
allow_none=True,
emit=emit,
context=context,
)
context.setLoopBreakTarget(old_loop_break)
context.setLoopContinueTarget(old_loop_continue)
# Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway.
old_source_ref = context.setCurrentSourceCodeReference(
statement.getSourceReference()
)
getErrorExitBoolCode(
condition="CONSIDER_THREADING() == false", emit=emit, context=context
)
context.setCurrentSourceCodeReference(old_source_ref)
getGotoCode(loop_start_label, emit)
if loop_end_label is not None:
getLabelCode(loop_end_label, emit)
| 34.858696 | 111 | 0.752728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,505 | 0.469286 |
b995831c9a98c5b05882c5bbcc4b241cd51503bd | 4,837 | py | Python | 3_module/C_BloomFilter.py | L4mborg1n1-D14610/Algoritms_and_DataStructure | f61b7434dbc600da02e8ec38648fa84beb160f17 | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | 3_module/C_BloomFilter.py | L4mborg1n1-D14610/Algoritms_and_DataStructure | f61b7434dbc600da02e8ec38648fa84beb160f17 | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | 3_module/C_BloomFilter.py | L4mborg1n1-D14610/Algoritms_and_DataStructure | f61b7434dbc600da02e8ec38648fa84beb160f17 | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | import math
from sys import exit
# итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер
# структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P
# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции,
# pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число.
# При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter
# и будем хранить в структуре данных.
# Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует,
# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить
# указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив
Mersen_31 = 2147483647
class BitArray:
def __init__(self, size):
self.__array = bytearray(int(math.ceil(size / 8)))
self.__size = size
def add_bit(self, i):
# i-тый бит содержится в i//8 байте на i % 8 месте
self.__array[i // 8] |= 2 ** (7 - (i % 8))
def check_bit(self, i):
if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0:
return False
else:
return True
def print(self):
array_str = ""
for byte in self.__array:
_line = str(bin(byte))[2:]
if len(_line) != 8:
_line = '0' * (8 - len(_line)) + _line
array_str += _line
return array_str[:self.__size]
class BloomFilter:
def __init__(self, n: int, p: float):
self.size = int(-round(n * math.log2(p) / math.log(2)))
self.hash_numbers = int(-round(math.log2(p)))
self.__prime_numbers = list()
self.__get_prime(self.hash_numbers + 1)
self.__bitarray = BitArray(self.size)
def __get_prime(self, prime_size):
# обычный проход по всем числам и их проверка на простоту - сложно по времени
# немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на
# делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)
if prime_size == 1:
self.__prime_numbers.append(2)
return
self.__prime_numbers.append(2)
i = 3
while len(self.__prime_numbers) < prime_size:
j = 1
prime_flag = True
while j < len(self.__prime_numbers):
if (i % self.__prime_numbers[j]) == 0:
prime_flag = False
break
j += 1
if prime_flag:
self.__prime_numbers.append(i)
i += 2
def __get_hash(self, x, i):
return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size
def add(self, key: int):
i = 0
while i < self.hash_numbers:
self.__bitarray.add_bit(self.__get_hash(key, i))
i += 1
def search(self, key: int):
i = 0
while i < self.hash_numbers:
if not self.__bitarray.check_bit(self.__get_hash(key, i)):
return False
i += 1
return True
def print(self):
return self.__bitarray.print()
bloom_filter = 0
while True:
try:
line = input().split()
if len(line) == 0:
continue
else:
if line[0] == "set":
try:
elements_number = int(line[1])
probability = float(line[2])
if (elements_number <= 0) | (probability <= 0) | (probability >= 1):
print("error")
continue
bloom_filter = BloomFilter(elements_number, probability)
if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):
print("error")
continue
break
except TypeError:
print("error")
continue
else:
print("error")
continue
except EOFError:
exit()
print(bloom_filter.size, bloom_filter.hash_numbers)
while True:
try:
line = input().split()
if len(line) == 0:
continue
elif line[0] == "print":
print(bloom_filter.print())
elif (line[0] == "add") & (line[1].isnumeric()):
bloom_filter.add(int(line[1]))
elif (line[0] == "search") & (line[1].isnumeric()):
print(int(bloom_filter.search(int(line[1]))))
else:
print("error")
except EOFError:
break
| 34.798561 | 116 | 0.551995 | 2,671 | 0.46975 | 0 | 0 | 0 | 0 | 0 | 0 | 2,126 | 0.373901 |
b996ad8d5f407e5b1769d9b50ca7be5705a211e8 | 1,937 | py | Python | pyzmq/examples/pubsub/subscriber.py | Surfndez/source-publish | c3838b303c1a0806f21cd4e8d8c207015b3ce9c8 | [
"Intel"
] | null | null | null | pyzmq/examples/pubsub/subscriber.py | Surfndez/source-publish | c3838b303c1a0806f21cd4e8d8c207015b3ce9c8 | [
"Intel"
] | 1 | 2021-01-21T17:43:33.000Z | 2021-01-21T17:43:33.000Z | pyzmq/examples/pubsub/subscriber.py | Surfndez/source-publish | c3838b303c1a0806f21cd4e8d8c207015b3ce9c8 | [
"Intel"
] | null | null | null | """A test that subscribes to NumPy arrays.
Uses REQ/REP (on PUB/SUB socket + 1) to synchronize
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2010 Brian Granger
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import time
import zmq
import numpy
def sync(connect_to):
# use connect socket + 1
sync_with = ':'.join(connect_to.split(':')[:-1] +
[str(int(connect_to.split(':')[-1]) + 1)]
)
ctx = zmq.Context.instance()
s = ctx.socket(zmq.REQ)
s.connect(sync_with)
s.send('READY')
s.recv()
def main():
if len (sys.argv) != 3:
print 'usage: subscriber <connect_to> <array-count>'
sys.exit (1)
try:
connect_to = sys.argv[1]
array_count = int (sys.argv[2])
except (ValueError, OverflowError), e:
print 'array-count must be integers'
sys.exit (1)
ctx = zmq.Context()
s = ctx.socket(zmq.SUB)
s.connect(connect_to)
s.setsockopt(zmq.SUBSCRIBE,'')
sync(connect_to)
start = time.clock()
print "Receiving arrays..."
for i in range(array_count):
a = s.recv_pyobj()
print " Done."
end = time.clock()
elapsed = (end - start) * 1000000
if elapsed == 0:
elapsed = 1
throughput = (1000000.0 * float (array_count)) / float (elapsed)
message_size = a.nbytes
megabits = float (throughput * message_size * 8) / 1000000
print "message size: %.0f [B]" % (message_size, )
print "array count: %.0f" % (array_count, )
print "mean throughput: %.0f [msg/s]" % (throughput, )
print "mean throughput: %.3f [Mb/s]" % (megabits, )
time.sleep(1.0)
if __name__ == "__main__":
main()
| 25.826667 | 78 | 0.545173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 694 | 0.358286 |
b997c70668ace413cc27502883f737e007e56239 | 1,006 | py | Python | Doc/includes/sqlite3/load_extension.py | livioso/cpython | 077061a7b24917aaf31057885c69919c5a553c88 | [
"PSF-2.0"
] | 36 | 2019-06-07T20:44:06.000Z | 2022-03-23T06:19:43.000Z | Doc/includes/sqlite3/load_extension.py | livioso/cpython | 077061a7b24917aaf31057885c69919c5a553c88 | [
"PSF-2.0"
] | 49 | 2016-02-29T17:59:52.000Z | 2019-05-05T04:59:26.000Z | Doc/includes/sqlite3/load_extension.py | livioso/cpython | 077061a7b24917aaf31057885c69919c5a553c88 | [
"PSF-2.0"
] | 28 | 2019-06-27T04:11:27.000Z | 2022-03-11T06:27:44.000Z | import sqlite3
con = sqlite3.connect(":memory:")
# enable extension loading
con.enable_load_extension(True)
# Load the fulltext search extension
con.execute("select load_extension('./fts3.so')")
# alternatively you can load the extension using an API call:
# con.load_extension("./fts3.so")
# disable extension loading again
con.enable_load_extension(False)
# example from SQLite wiki
con.execute("create virtual table recipe using fts3(name, ingredients)")
con.executescript("""
insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes');
insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery');
insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour');
insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter');
""")
for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"):
print(row)
| 37.259259 | 104 | 0.744533 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 801 | 0.796223 |
b9982b7f935a0931c3a9dc4e8ec48b12b5523acb | 22,060 | py | Python | lingvo/core/inference_graph_exporter.py | RunzheYang/lingvo | 1291e29812f9ee9836f9cacbb05db9ec6b095234 | [
"Apache-2.0"
] | 1 | 2021-09-02T18:04:13.000Z | 2021-09-02T18:04:13.000Z | lingvo/core/inference_graph_exporter.py | RunzheYang/lingvo | 1291e29812f9ee9836f9cacbb05db9ec6b095234 | [
"Apache-2.0"
] | null | null | null | lingvo/core/inference_graph_exporter.py | RunzheYang/lingvo | 1291e29812f9ee9836f9cacbb05db9ec6b095234 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for exporting an InferenceGraph proto from model params."""
import collections
import contextlib
import re
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import bfloat16_variables
from lingvo.core import inference_graph_pb2
from lingvo.core import py_utils
import six
from google.protobuf import text_format
FLAGS = tf.flags.FLAGS
# InferenceDeviceOptions contains options to configure inference on the device.
# device: Device to infer on.
# retain_device_placement: If true, the specified device in the generated
# inference graph nodes will be retained. Otherwise, the specified device
# will be cleared, so that the runtime can choose automatically.
# var_options: Options on handling variables. For TPUs, variables can be
# either placed on device through 'ON_DEVICE' option, or treated as
# constants with AS_CONSTANTS.
# gen_init_op: Whether to serialize initialization ops for the device. For TPUs,
# servers can be initialized globally once, in which case this should be
# turned off to avoid tripping initialization checks.
# dtype_override: Whether to override the dtype to use for activations and
# weights in the model. Options supported are None or tf.bfloat16.
InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [
'device', 'retain_device_placement', 'var_options', 'gen_init_op',
'dtype_override', 'fprop_dtype_override'
])
_CONST_GUARANTEE = None
@contextlib.contextmanager
def NoConstGuaranteeScope():
"""Disallow const gauranteeing variable with-in scope."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_caching_device(None)
_CONST_GUARANTEE = False
yield
_CONST_GUARANTEE = old_val
var_scope.set_caching_device(old_caching_device)
# Marks variable as constants for compilation
def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):
global _CONST_GUARANTEE
if _CONST_GUARANTEE:
with tf.control_dependencies(None):
return tf.guarantee_const(
getter(name, *args, **kwargs), name=name + '/GuaranteeConst')
else:
return getter(name, *args, **kwargs)
@contextlib.contextmanager
def ConstGuaranteeScope():
"""Treats all variables under this scope as constants."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_custom_getter = var_scope.custom_getter
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_custom_getter(MaybeGuaranteeConstGetter)
var_scope.set_caching_device(lambda op: op.device)
_CONST_GUARANTEE = True
yield
_CONST_GUARANTEE = old_val
var_scope.set_custom_getter(old_custom_getter)
var_scope.set_caching_device(old_caching_device)
@contextlib.contextmanager
def _DummyScope():
yield None
def _GetVarName(v):
return v.name[:-len(':0')]
def _MakeVariableDictionary(variables):
"""Returns a dictionary with name -> tf.Variable() mapping."""
vars_dict = {}
for v in variables:
vars_dict[_GetVarName(v)] = v
return vars_dict
def IsTpu(device_options):
return device_options.device == 'tpu'
def ShouldForceBfloat16ForWeightsAndActivations(device_options):
return device_options.dtype_override == tf.bfloat16
def ShouldForceBfloat16ForActivations(device_options):
return device_options.fprop_dtype_override == tf.bfloat16
def ConvertSubgraphDictToProto(subgraphs_dict):
"""Converts dict of subgraphs/feeds/fetches to InferenceGraph.
Args:
subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a
NestedMap.
Returns:
Equivalent InferenceGraph.
"""
# Build the output inference graph.
inference_graph_proto = inference_graph_pb2.InferenceGraph()
for subgraph_name, tensors in subgraphs_dict.items():
fetches = tensors[0]
feeds = tensors[1]
# Rewrite fetches and feeds to map to their tensor name instead of
# Tensor instance.
named_fetches = {k: v.name for k, v in fetches.items() if v is not None}
named_feeds = {k: v.name for k, v in feeds.items() if v is not None}
# Export as subgraph.
inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches)
inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds)
return inference_graph_proto
def GetOutputOpNames(graph,
inference_graph_proto,
subgraphs=None,
preserve_colocation_nodes=True,
preserve_saver_restore_nodes=False,
preserve_extra_ops=None):
"""Gets output op names from an inference graph.
Args:
graph: The tf graph.
inference_graph_proto: an InferenceGraph proto.
subgraphs: an optional list of subgraph names. If provided, only output ops
from these subgraphs are preserved. Otherwise, all subgraphs are included.
preserve_colocation_nodes: a Python bool, default to True. Preserves nodes
colocating with the closure of output ops in the returned array.
preserve_saver_restore_nodes: a Python bool, default to False. Preserves
nodes for restoring according to inference_graph_proto.saver_def.
preserve_extra_ops: an optional list of extra op names to preserve as long
as they present in the graph.
Returns:
Array of tf op names that should be preserved in the graph.
"""
output_op_names = set()
def _GetOpName(tensor_or_op_name):
"""Returns the op name of the given node name."""
# Tensor names have format <op_name>:<output_index>. Some inference
# graphs put tensors and others put ops in the feeds/fetches (depends
# on how it is used). We differentiate here. We still do the lookup in
# the graph to sanity check (versus relying on the text manipulation).
# If this logic ever breaks, TensorFlow will raise a ValueError with
# a description of the syntax of each.
if re.search(r':[0-9]+$', tensor_or_op_name):
# Tensor-name.
t = graph.get_tensor_by_name(tensor_or_op_name)
return t.op.name
else:
op = graph.get_operation_by_name(tensor_or_op_name)
return op.name
for subgraph_name, subgraph in inference_graph_proto.subgraphs.items():
if subgraphs and subgraph_name not in subgraphs:
tf.logging.info('Skip subgraph %s.', subgraph_name)
continue
# Sometimes feeds aren't connected to any outputs but keep them in the graph
# anyways to avoid errors.
for tensor_or_op_name in (list(subgraph.feeds.values()) +
list(subgraph.fetches.values())):
output_op_names.add(_GetOpName(tensor_or_op_name))
if preserve_saver_restore_nodes:
# Only nodes for restoring is preserved. saver_def.save_tensor_name is
# skipped because it's only used for saving.
saver_def = inference_graph_proto.saver_def
for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]:
try:
output_op_names.add(_GetOpName(op_name))
except KeyError:
tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name)
if not preserve_colocation_nodes and not preserve_extra_ops:
return sorted(list(output_op_names))
# We also need to preserve any nodes that are used for colocation.
# E.g., a node may have this attr:
# attr {
# key: "_class"
# value {
# list {
# s: "loc:@inference/embedding_lookup/Read/ReadVariableOp"
# }
# }
# }
#
# In this case, we need to make sure the node
# inference/embedding_lookup/Read/ReadVariableOp is not pruned.
#
# TODO(zhifengc): It's possible that it's better to fix in
# tf.graph_util.extract_sub_graph.
graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),
list(output_op_names))
reachable_vars = [node.name for node in graph_def.node]
for node in graph.get_operations():
if preserve_extra_ops and node.name in preserve_extra_ops:
output_op_names.add(node.name)
elif preserve_colocation_nodes and '_class' in node.node_def.attr:
for loc in node.node_def.attr['_class'].list.s:
loc = six.ensure_text(loc, 'utf-8')
if loc.startswith('loc:@'):
loc_name = loc[5:]
if loc_name not in reachable_vars:
# Skip nodes that cannot be reached from the pruned graph.
continue
output_op_names.add(node.name)
return sorted(list(output_op_names))
def _ParamExists(param_obj, param_name):
"""Tests whether param_name is contained in param_obj."""
if not param_obj:
return
for k, _ in param_obj.IterParams():
if k == param_name:
return True
return False
def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names):
"""Freezes a graph from a checkpoint.
Args:
graph: tf.Graph.
saver: The tf.Saver to use for restoration.
checkpoint: The checkpoint to restore.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
sess = tf.Session(graph=graph, config=py_utils.SessionConfig())
saver.restore(sess, checkpoint)
return tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_op_names)
def _FreezeDefaults(graph, output_op_names):
"""Default initializes a graph and freezes it.
Args:
graph: tf.Graph.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess:
sess.run(graph.get_operation_by_name('init_all_variables'))
return tf.graph_util.convert_variables_to_constants(sess,
graph.as_graph_def(),
output_op_names)
class InferenceGraphExporter:
"""Class for exporting inference graphs."""
@classmethod
def Export(cls,
model_cfg,
model_task_name=None,
device_options=InferenceDeviceOptions(
device='',
retain_device_placement=False,
var_options=None,
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None),
freeze_checkpoint=None,
freeze_defaults=False,
export_path=None,
subgraph_filter=None,
random_seed=None,
disable_packed_input=True):
"""Exports a InferenceGraph proto with piecewise subgraphs.
Sets FLAGS.enable_asserts to False unless user explicitly sets it to True.
Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing
and multi-core inference on TPUs work properly.
Args:
model_cfg: a Params instance as returned by
model_registry.GetParams(modelname, 'Test') or model_params.Model().
model_task_name: The task to generate an inference graph for. Should be
None for single-task models.
device_options: Device options for the accelerator used for serving.
freeze_checkpoint: The checkpoint to load. Loads and freezes the model if
given.
freeze_defaults: Default initializes the graph and freeze. Useful for
early testing of downstream tools without having a checkpoint.
export_path: If not None, write the inference graph in ASCII to this path.
subgraph_filter: A string or a list of subgraph names. If not None or
empty, export only this list of inference subgraphs.
random_seed: Fixes the random seed in the exported inference graph.
disable_packed_input: Disable packed input for inference writing purposes.
Returns:
InferenceGraph proto.
Raises:
ValueError: if the model does not support the listed subgraphs.
"""
assert issubclass(model_cfg.cls, base_model.BaseModel)
if device_options.dtype_override and device_options.fprop_dtype_override:
raise ValueError(
'device_options{dtype_override,fprop_dtype_override) can not both be'
'set.')
if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)):
subgraph_filter = [subgraph_filter]
# Disable assertions unless user explicitly enables it.
if FLAGS['enable_asserts'].using_default_value:
FLAGS.enable_asserts = False
# TODO(laurenzo): Work out how much we need to specify here in terms of
# cluster configuration.
cls._SetClusterParams(model_cfg.cluster, device_options)
# Configure the model.
model_cfg.random_seed = random_seed
model_cfg.is_inference = True
if disable_packed_input:
def _DisablePackedInput(task):
if (_ParamExists(task, 'encoder') and
_ParamExists(task.encoder, 'packed_input')):
task.encoder.packed_input = False
if (_ParamExists(task, 'decoder') and
_ParamExists(task.decoder, 'packed_input')):
task.decoder.packed_input = False
if issubclass(model_cfg.cls, base_model.MultiTaskModel):
for _, task_param in model_cfg.task_params.IterParams():
_DisablePackedInput(task_param)
else:
_DisablePackedInput(model_cfg.task)
tf.logging.debug('Model %s params:', model_cfg.name)
for line in model_cfg.ToText().split('\n'):
tf.logging.debug('%s', line)
# Instantiate the graph.
graph = tf.Graph()
with graph.as_default():
tf.random.set_seed(random_seed)
cluster = model_cfg.cluster.Instantiate()
device = cluster.GetPlacer()
tpu_const_scope = _DummyScope()
if (IsTpu(device_options) and
device_options.var_options == 'AS_CONSTANTS'):
# Do not specify devices for variables if we are marking them as
# constants.
device = ''
tpu_const_scope = ConstGuaranteeScope()
with cluster, tf.device(device), tpu_const_scope:
bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations(
device_options)
if bfloat16_override:
py_utils.UpdateDtype(model_cfg, tf.bfloat16)
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
act_bfloat16_override = ShouldForceBfloat16ForActivations(
device_options)
if act_bfloat16_override:
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
# Hard-code TPU-related flags prior to instantiating model.
old_enable_asserts = FLAGS.enable_asserts
old_xla_device = FLAGS.xla_device
if IsTpu(device_options):
FLAGS.enable_asserts = False
FLAGS.xla_device = 'tpu'
try:
mdl = model_cfg.Instantiate()
task = mdl.GetTask(model_task_name)
variables_to_restore = (
_MakeVariableDictionary(tf.global_variables()) if not mdl.ema else
mdl.ema.variables_to_restore(mdl.variables_for_ema))
if bfloat16_override:
saver_var_spec = (
bfloat16_variables
.get_saver_spec_for_variables_with_bf16_overrides(
variables_to_restore))
else:
saver_var_spec = variables_to_restore
saver = tf.train.Saver(saver_var_spec)
tf.variables_initializer(
tf.global_variables(), name='init_all_variables')
if IsTpu(device_options) and device_options.gen_init_op:
tf.group(tf.tpu.initialize_system(), name='tpu_init_op')
if freeze_checkpoint or freeze_defaults:
# Replace variables with tensors using tf.identity in theta before
# freezing to avoid the graph referencing types of DT_RESOURCE.
def AddIdentityToTheta(layer):
layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access
layer.children.Transform(AddIdentityToTheta)
AddIdentityToTheta(task)
inference_graph_proto = inference_graph_pb2.InferenceGraph()
subgraphs_proto = task.Inference()
if isinstance(subgraphs_proto, dict):
subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto)
for name, subgraph in subgraphs_proto.subgraphs.items():
if not subgraph_filter or name in subgraph_filter:
inference_graph_proto.subgraphs[name].CopyFrom(subgraph)
# Yes, graph collections are bad, however this seems to be the
# easiest way to get this assets registered from
# TextFileInitializer.
assets_collection = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS)
for asset in assets_collection:
if asset.op.type == 'Const' and asset.op.get_attr(
'dtype') == tf.dtypes.string:
constant_value = asset.op.get_attr('value')
if constant_value.string_val:
tf.logging.info('Found asset file_path: %s',
constant_value.string_val[0])
asset_file_def = inference_graph_proto.asset_file_def.add()
asset_file_def.tensor_info.name = asset.name
asset_file_def.filename = constant_value.string_val[0]
# Add a table init op and global variable init op to the graph.
# Tables can be declared anywhere in the graph, so this op has to be
# added last.
tf.tables_initializer(name='init_all_tables')
finally:
# Reset TPU-related flags after model instantiation.
FLAGS.enable_asserts = old_enable_asserts
FLAGS.xla_device = old_xla_device
tf.logging.info('Graph contains ops: %r',
[op.name for op in graph.get_operations()])
# Collection defs
if not tf.executing_eagerly():
meta_graph = tf.train.export_meta_graph(graph=graph)
for key in meta_graph.collection_def:
tf.logging.info('copying collection %s', key)
inference_graph_proto.collection_def[key].CopyFrom(
meta_graph.collection_def[key])
else:
tf.logging.warning('Not exporting collection defs '
'since operating in eager mode.')
# Freezing.
if freeze_defaults or freeze_checkpoint:
output_op_names = GetOutputOpNames(
graph,
inference_graph_proto,
preserve_colocation_nodes=False,
preserve_saver_restore_nodes=False)
if cls._DeviceSupportsFreezing(device_options):
raise ValueError('freeze_checkpoint cannot be used with device ' +
device_options.device)
if freeze_checkpoint:
tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint)
graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint,
output_op_names)
elif freeze_defaults:
tf.logging.info('Default initializing graph and freezing.')
graph_def = _FreezeDefaults(graph, output_op_names)
else:
inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def())
output_op_names = GetOutputOpNames(graph, inference_graph_proto)
# Prune the graph to just the parts we need.
# To support restoring, we have to not prune out the restore node.
output_op_names.append('init_all_tables')
output_op_names.append('init_all_variables')
output_op_names.append('save/control_dependency')
output_op_names.append('save/restore_all')
if IsTpu(device_options) and device_options.gen_init_op:
output_op_names.append('tpu_init_op')
graph_def = graph.as_graph_def()
tf.logging.info('Pruning graph to output ops: %r', output_op_names)
graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names)
if not device_options.retain_device_placement:
# Clear the device so that the runtime can choose.
tf.logging.info('Clearing device placement for: %s',
device_options.device)
for node in graph_def.node:
node.ClearField('device')
for function in graph_def.library.function:
for node_def in function.node_def:
node_def.ClearField('device')
inference_graph_proto.graph_def.CopyFrom(graph_def)
if export_path:
with tf.io.gfile.GFile(export_path, 'w') as f:
f.write(text_format.MessageToString(inference_graph_proto))
return inference_graph_proto
@classmethod
def _SetClusterParams(cls, cluster_params, device_options):
"""Sets cluster params.
Args:
cluster_params: Model().cluster config.
device_options: InferenceDeviceOptions.
"""
def Update(p):
"""Update cluster params `p`."""
p.name = '/job:localhost'
p.replicas = 1
p.tpus_per_replica = 1 if IsTpu(device_options) else 0
p.gpus_per_replica = 0
p.devices_per_split = 1
cluster_params.mode = 'sync'
cluster_params.job = 'decoder'
cluster_params.add_summary = False
cluster_params.do_eval = True
Update(cluster_params.controller)
Update(cluster_params.worker)
Update(cluster_params.ps)
Update(cluster_params.evaler)
Update(cluster_params.decoder)
Update(cluster_params.input)
@classmethod
def _DeviceSupportsFreezing(cls, device_options):
return IsTpu(device_options)
| 38.100173 | 116 | 0.694334 | 11,523 | 0.522348 | 958 | 0.043427 | 12,475 | 0.565503 | 0 | 0 | 8,165 | 0.370127 |
b9982e3e4e7a4b4799e5780bd7629d5235cc1b40 | 1,836 | py | Python | src/preprocessing/annual_hc_by_crime_loc.py | VijayKalmath/USCrimeAnalysis | 14c96aae52547a4f7ea140395c62a621a97def50 | [
"MIT"
] | null | null | null | src/preprocessing/annual_hc_by_crime_loc.py | VijayKalmath/USCrimeAnalysis | 14c96aae52547a4f7ea140395c62a621a97def50 | [
"MIT"
] | null | null | null | src/preprocessing/annual_hc_by_crime_loc.py | VijayKalmath/USCrimeAnalysis | 14c96aae52547a4f7ea140395c62a621a97def50 | [
"MIT"
] | null | null | null | #! usr/env/bin python
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
def main():
# Fetch File Paths
file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls')
# Sort them according to year
file_paths.sort(key = lambda x: int(x[-8:-4]))
# Create a result dataframe to store the data
df_res = get_place_crime_count(file_paths[0])
# Iterate over the rest of the files
for p in tqdm(file_paths[1:]):
df_temp = get_place_crime_count(p)
df_res = pd.merge(df_res, df_temp, on = "Place", how = "left")
# Save the result to disk
df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False)
def get_place_crime_count(path:str)->pd.DataFrame:
"""
Function to return
"""
# Extracting the table name from and year from the given file path
t_name = " ".join(path[path.index("Table"):path.index("_Incidents")].split("_"))
t_year = path[path.index(".xls")-4:path.index(".xls")]
try:
# Read the Excel spreadsheet
df = pd.read_excel(path,sheet_name=t_name)
# Get the start and end indices of the interested datapoints
start = df.index[df[t_name] == "Total"][0] + 1
end = df.index[df[t_name] == "Multiple locations"][0]
# Slice the dataset
df = df.iloc[start:end,0:2]
# Reset the index for the reduced dataframe
df.reset_index(drop = True, inplace = True)
# Rename the columns
df.rename(columns={t_name: "Place", "Unnamed: 1": t_year}, inplace = True)
# Return the value
return df
except:
# If there is no such data return an empty dataframe
i_list = list(range(0,47))
return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year])
if __name__ == '__main__':
main()
| 33.381818 | 84 | 0.6378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 720 | 0.392157 |
b998534e368ce74be309448b790e384f839c6d4a | 1,672 | py | Python | allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | ethanjperez/allennlp | e520993f16f0da7e2c40f6e44b8dc56338f46b57 | [
"Apache-2.0"
] | 24 | 2019-09-16T00:10:54.000Z | 2021-09-08T19:31:51.000Z | allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | ethanjperez/allennlp | e520993f16f0da7e2c40f6e44b8dc56338f46b57 | [
"Apache-2.0"
] | null | null | null | allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | ethanjperez/allennlp | e520993f16f0da7e2c40f6e44b8dc56338f46b57 | [
"Apache-2.0"
] | 7 | 2019-09-16T02:37:31.000Z | 2021-09-01T06:06:17.000Z | # pylint: disable=no-self-use,invalid-name
import numpy as np
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBagOfWordCountsTokenEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
def test_forward_calculates_bow_properly(self):
params = Params({})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_projects_properly(self):
params = Params({"projection_dim": 50})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| 45.189189 | 93 | 0.70634 | 1,349 | 0.806818 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.047249 |
b998e92d411833a80bc4657adf0243c90d5c6084 | 5,457 | py | Python | demo/demo_shapenet.py | hengkaiz/meshrcnn | eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8 | [
"BSD-3-Clause"
] | null | null | null | demo/demo_shapenet.py | hengkaiz/meshrcnn | eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8 | [
"BSD-3-Clause"
] | null | null | null | demo/demo_shapenet.py | hengkaiz/meshrcnn | eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import logging
import multiprocessing as mp
import logging
import os
from detectron2.evaluation import inference_context
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from fvcore.common.file_io import PathManager
from pathlib import Path
from pytorch3d.io import save_obj
from shapenet.config.config import get_shapenet_cfg
from shapenet.data.utils import imagenet_preprocess
from shapenet.modeling.heads import voxel_head
from shapenet.modeling.mesh_arch import build_model
from shapenet.utils.checkpoint import clean_state_dict
import torchvision.transforms as T
import glob
from PIL import Image
import trimesh
import pyvista as pv
import pyacvd
import numpy as np
logger = logging.getLogger('demo')
def setup_cfgs(args):
cfg = get_shapenet_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/shapenet/voxmesh_R50.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input main folder")
# parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def resample_mesh(mesh, count=2466):
pv_mesh = pv.wrap(mesh)
# logger.info('Original mesh:')
# print(pv_mesh)
clus = pyacvd.Clustering(pv_mesh)
clus.subdivide(3)
clus.cluster(count)
# remesh
remesh = clus.create_mesh()
# verts = remesh.points
# faces = remesh.faces.reshape((-1, 4))[:, 1:]
return remesh
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
device = torch.device("cuda:%d" % 0)
logger = setup_logger(name="demo shapenet")
logger.info("Arguments: " + str(args))
cfg = setup_cfgs(args)
# load checkpoing and build model
if cfg.MODEL.CHECKPOINT == "":
raise ValueError("Invalid checkpoing provided")
logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT))
cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT))
state_dict = clean_state_dict(cp["best_states"]["model"])
model = build_model(cfg)
model.load_state_dict(state_dict)
logger.info("Model loaded")
model.to(device)
sub_dir = sorted(os.listdir(args.input))
for sd in sub_dir:
curr_path = os.path.join(args.input, sd)
images = glob.glob(curr_path + "/*.png")
for img_dir in images:
# load image
transform = [T.ToTensor()]
transform.append(imagenet_preprocess())
transform = T.Compose(transform)
im_name = img_dir.split("/")[-1].split(".")[0]
with PathManager.open(img_dir, "rb") as f:
img = Image.open(f).convert("RGB")
img = transform(img)
img = img[None, :, :, :]
img = img.to(device)
with inference_context(model):
img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img)
# Save voxel_score
voxel_odir = os.path.join(curr_path, "voxel_score")
if not Path(voxel_odir).is_dir():
os.mkdir(voxel_odir)
voxel_file = os.path.join(voxel_odir, "%s.pt" % (im_name))
torch.save(voxel_scores, voxel_file)
# Save image features
imgfeat_odir = os.path.join(curr_path, "img_feat")
if not Path(imgfeat_odir).is_dir():
os.mkdir(imgfeat_odir)
img_feat_file = os.path.join(imgfeat_odir, "%s.pt" % (im_name))
torch.save(img_feats, img_feat_file)
# Save P
p_odir = os.path.join(curr_path, "P")
if not Path(p_odir).is_dir():
os.mkdir(p_odir)
p_file = os.path.join(p_odir, "%s.pt" % (im_name))
torch.save(P, p_file)
# Save cubified mesh
cmesh_odir = os.path.join(curr_path, "cube_mesh")
if not Path(cmesh_odir).is_dir():
os.mkdir(cmesh_odir)
cube_mesh_file = os.path.join(cmesh_odir, "%s_cube.obj" % (im_name))
c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0)
save_obj(cube_mesh_file, c_verts, c_faces)
# Save predicted mesh
mesh_odir = os.path.join(curr_path, "final_mesh")
if not Path(mesh_odir).is_dir():
os.mkdir(mesh_odir)
save_file = os.path.join(mesh_odir, "%s.obj" % (im_name))
verts, faces = meshes_pred[-1].get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
logger.info("Predictions saved for %s/%s" % (curr_path.split('/')[-1], im_name))
| 31.912281 | 99 | 0.637713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.175188 |
b998f6994cf6e83702b501cd661bb37f91b59317 | 7,854 | py | Python | proglearn/voters.py | jshin13/progressive-learning | dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc | [
"Apache-2.0"
] | null | null | null | proglearn/voters.py | jshin13/progressive-learning | dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc | [
"Apache-2.0"
] | null | null | null | proglearn/voters.py | jshin13/progressive-learning | dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc | [
"Apache-2.0"
] | null | null | null | import numpy as np
# from sklearn.ensemble import BaggingClassifier
# from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from .base import BaseVoter
from tensorflow import keras
from keras import layers
class TreeClassificationVoter(BaseVoter):
def __init__(self, finite_sample_correction=False):
"""
Doc strings here.
"""
self.finite_sample_correction = finite_sample_correction
self._is_fitted = False
self.multilabel = False
def fit(self, X, y):
"""
Doc strings here.
"""
check_classification_targets(y)
if type_of_target(y) == 'multilabel-indicator':
# Fit multilabel binary task.
self.multilabel = True
return self.fit_multilabel(X, y)
num_classes = len(np.unique(y))
self.uniform_posterior = np.ones(num_classes) / num_classes
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
class_counts = [
len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)
]
posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts))
if self.finite_sample_correction:
posteriors = self._finite_sample_correction(
posteriors, len(idxs_in_leaf), len(np.unique(y))
)
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def fit_multilabel(self, X, y):
num_labels = y.shape[1]
self.uniform_posterior = y.sum(axis=0) / len(y)
# Each posterior is now a num_labels size vector or binary probabilities.
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
label_counts = [
len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels)
]
posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts))
# TODO: multilabel finite sample correction.
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_posterior.keys()):
votes_per_example.append(self.leaf_to_posterior[x])
else:
votes_per_example.append(self.uniform_posterior)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):
"""
encourage posteriors to approach uniform when there is low data
"""
correction_constant = 1 / (num_classes * num_points_in_partition)
zero_posterior_idxs = np.where(posteriors == 0)[0]
posteriors[zero_posterior_idxs] = correction_constant
posteriors /= sum(posteriors)
return posteriors
class KNNClassificationVoter(BaseVoter):
def __init__(self, k, kwargs={}):
"""
Doc strings here.
"""
self._is_fitted = False
self.k = k
self.kwargs = kwargs
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.knn = KNeighborsClassifier(self.k, **self.kwargs)
self.knn.fit(X, y)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.knn.predict_proba(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class NeuralRegressionVoter(BaseVoter):
def __init__(
self, validation_split=0.25, loss="mse", epochs=100, lr=1e-4, verbose=False,
):
"""
Doc strings here.
"""
self.validation_split = validation_split
self.loss = loss
self.epochs = epochs
self.lr = lr
self.verbose = verbose
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.voter = keras.Sequential()
self.voter.add(
layers.Dense(
1,
activation="linear",
input_shape=(X.shape[1],),
name="transform_to_vote",
)
)
self.voter.compile(
loss=self.loss, metrics=["mae"], optimizer=keras.optimizers.Adam(self.lr)
)
self.voter.fit(
X,
y,
epochs=self.epochs,
callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor="val_loss")],
verbose=self.verbose,
validation_split=self.validation_split,
shuffle=True,
)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.voter.predict(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class TreeRegressionVoter(BaseVoter):
def __init__(self):
"""
Doc strings here.
"""
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
self.leaf_to_yhat = {}
self.global_yhat = np.mean(y)
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
# class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)]
self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf]))
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_yhat.keys()):
votes_per_example.append(self.leaf_to_yhat[x])
else:
votes_per_example.append(self.global_yhat)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted | 26.805461 | 99 | 0.556277 | 7,412 | 0.943723 | 0 | 0 | 0 | 0 | 0 | 0 | 1,610 | 0.204991 |
b999024320e50c940c8f273e6f0536039450c829 | 1,949 | py | Python | config.py | jhattat/photoBooth | f6fe3ab418bb917792e10349597401ed34078766 | [
"MIT"
] | null | null | null | config.py | jhattat/photoBooth | f6fe3ab418bb917792e10349597401ed34078766 | [
"MIT"
] | null | null | null | config.py | jhattat/photoBooth | f6fe3ab418bb917792e10349597401ed34078766 | [
"MIT"
] | null | null | null | # Tumblr Setup
# Replace the values with your information
# OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info
consumer_key='ShbOqI5zErQXOL7Qnd5XduXpY9XQUlBgJDpCLeq1OYqnY2KzSt' #replace with your key
consumer_secret='ulZradkbJGksjpl2MMlshAfJgEW6TNeSdZucykqeTp8jvwgnhu' #replace with your secret code
oath_token='uUcBuvJx8yhk4HJIZ39sfcYo0W4VoqcvUetR2EwcI5Sn8SLgNt' #replace with your oath token
oath_secret='iNJlqQJI6dwhAGmdNbMtD9u7VazmX2Rk5uW0fuIozIEjk97lz4' #replace with your oath secret code
tumblr_blog = 'soniaetjeremie' # replace with your tumblr account name without .tumblr.com
tagsForTumblr = "photobooth" # change to tags you want, separated with commas
#Config settings to change behavior of photo booth
monitor_w = 800 # width of the display monitor
monitor_h = 480 # height of the display monitor
file_path = '/home/pi/photobooth/pics/' # path to save images
clear_on_startup = False # True will clear previously stored photos as the program launches. False will leave all previous photos.
debounce = 0.3 # how long to debounce the button. Add more time if the button triggers too many times.
post_online = True # True to upload images. False to store locally only.
capture_count_pics = True # if true, show a photo count between taking photos. If false, do not. False is faster.
make_gifs = True # True to make an animated gif. False to post 4 jpgs into one post.
hi_res_pics = False # True to save high res pics from camera.
# If also uploading, the program will also convert each image to a smaller image before making the gif.
# False to first capture low res pics. False is faster.
# Careful, each photo costs against your daily Tumblr upload max.
camera_iso = 400 # adjust for lighting issues. Normal is 100 or 200. Sort of dark is 400. Dark is 800 max.
# available options: 100, 200, 320, 400, 500, 640, 800 | 77.96 | 130 | 0.758338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,561 | 0.800924 |
b9991711cbe60fa3459b0fb4cb64d023132610e8 | 896 | py | Python | accounts/admin.py | GuilhemN/site-interludes | 69873810d5b0168aa57277ba51805117e6c53874 | [
"MIT"
] | null | null | null | accounts/admin.py | GuilhemN/site-interludes | 69873810d5b0168aa57277ba51805117e6c53874 | [
"MIT"
] | 1 | 2022-03-24T10:41:10.000Z | 2022-03-24T12:39:30.000Z | accounts/admin.py | GuilhemN/site-interludes | 69873810d5b0168aa57277ba51805117e6c53874 | [
"MIT"
] | 1 | 2022-03-23T22:30:12.000Z | 2022-03-23T22:30:12.000Z | from django.contrib import admin
from django.contrib.auth.models import Group
from accounts.models import EmailUser
from shared.admin import ExportCsvMixin
# no need for groups - we only have regular users and superusers
admin.site.unregister(Group)
@admin.register(EmailUser)
class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin):
"""option d'affichage des activités dans la vue django admin"""
filename = "export_utilisateurs.csv"
list_display = ("email", "last_name", "first_name", "is_superuser", "is_active", "email_confirmed",)
list_filter = ("is_superuser","is_active", "email_confirmed",)
fields = ("email", "last_name", "first_name", "is_superuser", "is_staff", "is_active", "email_confirmed",
("date_joined", "last_login",),
)
ordering = ("last_name", "first_name")
readonly_fields = ("date_joined", "last_login",)
list_per_page = 200
csv_export_exclude = ["password"]
| 37.333333 | 106 | 0.753348 | 616 | 0.686734 | 0 | 0 | 643 | 0.716834 | 0 | 0 | 432 | 0.481605 |
b9993aa0d134cc4869bfe49fd1ecd6dc8c6b0b96 | 23,640 | py | Python | rotkehlchen/exchanges/coinbase.py | vnavascues/rotki | 8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f | [
"BSD-3-Clause"
] | null | null | null | rotkehlchen/exchanges/coinbase.py | vnavascues/rotki | 8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f | [
"BSD-3-Clause"
] | null | null | null | rotkehlchen/exchanges/coinbase.py | vnavascues/rotki | 8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import hmac
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import asset_from_coinbase
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset
from rotkehlchen.exchanges.data_structures import AssetMovement, Trade
from rotkehlchen.exchanges.exchange import ExchangeInterface
from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_asset_amount_force_positive,
deserialize_asset_movement_category,
deserialize_fee,
deserialize_timestamp_from_date,
deserialize_trade_type,
)
from rotkehlchen.typing import (
ApiKey,
ApiSecret,
AssetMovementCategory,
Fee,
Location,
Price,
Timestamp,
TradePair,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock
from rotkehlchen.utils.serialization import rlk_jsonloads_dict
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]:
"""Turns a coinbase transaction into a rotkehlchen Trade.
https://developers.coinbase.com/api/v2?python#buys
If the coinbase transaction is not a trade related transaction returns None
Throws:
- UnknownAsset due to Asset instantiation
- DeserializationError due to unexpected format of dict entries
- KeyError due to dict entires missing an expected entry
"""
if raw_trade['status'] != 'completed':
# We only want to deal with completed trades
return None
if raw_trade['instant']:
raw_time = raw_trade['created_at']
else:
raw_time = raw_trade['payout_at']
timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase')
trade_type = deserialize_trade_type(raw_trade['resource'])
tx_amount = deserialize_asset_amount(raw_trade['amount']['amount'])
tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp)
native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount'])
native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp)
# in coinbase you are buying/selling tx_asset for native_asset
pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}')
amount = tx_amount
# The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee']['amount'])
fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp)
return Trade(
timestamp=timestamp,
location=Location.COINBASE,
pair=pair,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['id']),
)
class CoinbasePermissionError(Exception):
pass
class Coinbase(ExchangeInterface):
def __init__(
self,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super(Coinbase, self).__init__('coinbase', api_key, secret, database)
self.apiversion = 'v2'
self.base_uri = 'https://api.coinbase.com'
self.msg_aggregator = msg_aggregator
def first_connection(self) -> None:
self.first_connection_made = True
def _validate_single_api_key_action(
self,
method_str: str,
ignore_pagination: bool = False,
) -> Tuple[Optional[List[Any]], str]:
try:
result = self._api_query(method_str, ignore_pagination=ignore_pagination)
except CoinbasePermissionError as e:
error = str(e)
if 'transactions' in method_str:
permission = 'wallet:transactions:read'
elif 'buys' in method_str:
permission = 'wallet:buys:read'
elif 'sells' in method_str:
permission = 'wallet:sells:read'
elif 'deposits' in method_str:
permission = 'wallet:deposits:read'
elif 'withdrawals' in method_str:
permission = 'wallet:withdrawals:read'
elif 'trades' in method_str:
permission = 'wallet:trades:read'
# the accounts elif should be at the end since the word appears
# in other endpoints
elif 'accounts' in method_str:
permission = 'wallet:accounts:read'
else:
raise AssertionError(
f'Unexpected coinbase method {method_str} at API key validation',
)
msg = (
f'Provided Coinbase API key needs to have {permission} permission activated. '
f'Please log into your coinbase account and set all required permissions: '
f'wallet:accounts:read, wallet:transactions:read, '
f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, '
f'wallet:deposits:read, wallet:trades:read'
)
return None, msg
except RemoteError as e:
error = str(e)
if 'invalid signature' in error:
return None, 'Failed to authenticate with the Provided API key/secret'
elif 'invalid api key' in error:
return None, 'Provided API Key is invalid'
else:
# any other remote error
return None, error
return result, ''
def validate_api_key(self) -> Tuple[bool, str]:
"""Validates that the Coinbase API key is good for usage in Rotki
Makes sure that the following permissions are given to the key:
wallet:accounts:read, wallet:transactions:read,
wallet:buys:read, wallet:sells:read, wallet:withdrawals:read,
wallet:deposits:read
"""
result, msg = self._validate_single_api_key_action('accounts')
if result is None:
return False, msg
# now get the account ids
account_ids = self._get_account_ids(result)
if len(account_ids) != 0:
# and now try to get all transactions of an account to see if that's possible
method = f'accounts/{account_ids[0]}/transactions'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all buys of an account to see if that's possible
method = f'accounts/{account_ids[0]}/buys'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all sells of an account to see if that's possible
method = f'accounts/{account_ids[0]}/sells'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all deposits of an account to see if that's possible
method = f'accounts/{account_ids[0]}/deposits'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all withdrawals of an account to see if that's possible
method = f'accounts/{account_ids[0]}/withdrawals'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
return True, ''
def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]:
"""Gets the account ids out of the accounts response"""
account_ids = []
for account_data in accounts:
if 'id' not in account_data:
self.msg_aggregator.add_error(
'Found coinbase account entry without an id key. Skipping it. ',
)
continue
if not isinstance(account_data['id'], str):
self.msg_aggregator.add_error(
f'Found coinbase account entry with a non string id: '
f'{account_data["id"]}. Skipping it. ',
)
continue
account_ids.append(account_data['id'])
return account_ids
def _api_query(
self,
endpoint: str,
options: Optional[Dict[str, Any]] = None,
pagination_next_uri: str = None,
ignore_pagination: bool = False,
) -> List[Any]:
"""Performs a coinbase API Query for endpoint
You can optionally provide extra arguments to the endpoint via the options argument.
If this is an ongoing paginating call then provide pagination_next_uri.
If you want just the first results then set ignore_pagination to True.
"""
request_verb = "GET"
if pagination_next_uri:
request_url = pagination_next_uri
else:
request_url = f'/{self.apiversion}/{endpoint}'
if options:
request_url += urlencode(options)
timestamp = str(int(time.time()))
message = timestamp + request_verb + request_url
signature = hmac.new(
self.secret,
message.encode(),
hashlib.sha256,
).hexdigest()
log.debug('Coinbase API query', request_url=request_url)
self.session.headers.update({
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
# This is needed to guarantee the up to the given date
# API version response.
'CB-VERSION': '2019-08-25',
})
full_url = self.base_uri + request_url
try:
response = self.session.get(full_url)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Coinbase API request failed due to {str(e)}')
if response.status_code == 403:
raise CoinbasePermissionError(f'API key does not have permission for {endpoint}')
if response.status_code != 200:
raise RemoteError(
f'Coinbase query {full_url} responded with error status code: '
f'{response.status_code} and text: {response.text}',
)
try:
json_ret = rlk_jsonloads_dict(response.text)
except JSONDecodeError:
raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}')
if 'data' not in json_ret:
raise RemoteError(f'Coinbase json response does not contain data: {response.text}')
final_data = json_ret['data']
# If we got pagination and this is the first query, gather all the subsequent queries
if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination:
if 'next_uri' not in json_ret['pagination']:
raise RemoteError('Coinbase json response contained no "next_uri" key')
next_uri = json_ret['pagination']['next_uri']
if not next_uri:
# As per the docs: https://developers.coinbase.com/api/v2?python#pagination
# once we get an empty next_uri we are done
return final_data
additional_data = self._api_query(
endpoint=endpoint,
options=options,
pagination_next_uri=next_uri,
)
final_data.extend(additional_data)
return final_data
@protect_with_lock()
@cache_response_timewise()
def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]:
try:
resp = self._api_query('accounts')
except RemoteError as e:
msg = (
'Coinbase API request failed. Could not reach coinbase due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
returned_balances: Dict[Asset, Dict[str, Any]] = {}
for account in resp:
try:
if not account['balance']:
continue
amount = deserialize_asset_amount(account['balance']['amount'])
# ignore empty balances. Coinbase returns zero balances for everything
# a user does not own
if amount == ZERO:
continue
asset = asset_from_coinbase(account['balance']['currency'])
try:
usd_price = Inquirer().find_usd_price(asset=asset)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing coinbase balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
if asset in returned_balances:
amount = returned_balances[asset]['amount'] + amount
else:
returned_balances[asset] = {}
returned_balances[asset]['amount'] = amount
usd_value = returned_balances[asset]['amount'] * usd_price
returned_balances[asset]['usd_value'] = usd_value
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase balance result with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase balance result with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a coinbase account balance. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a coinbase account balance',
account_balance=account,
error=msg,
)
continue
return returned_balances, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Trade]:
account_data = self._api_query('accounts')
# now get the account ids and for each one query buys/sells
# Looking at coinbase's API no other type of transaction
# https://developers.coinbase.com/api/v2?python#list-transactions
# consitutes something that Rotkehlchen would need to return in query_trade_history
account_ids = self._get_account_ids(account_data)
raw_data = []
for account_id in account_ids:
raw_data.extend(self._api_query(f'accounts/{account_id}/buys'))
raw_data.extend(self._api_query(f'accounts/{account_id}/sells'))
log.debug('coinbase buys/sells history result', results_num=len(raw_data))
trades = []
for raw_trade in raw_data:
try:
trade = trade_from_coinbase(raw_trade)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase transaction with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase trade with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a coinbase trade. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a coinbase trade',
trade=raw_trade,
error=msg,
)
continue
# limit coinbase trades in the requested time range here since there
# is no argument in the API call
if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts:
trades.append(trade)
return trades
def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]:
"""Processes a single deposit/withdrawal from coinbase and deserializes it
Can log error/warning and return None if something went wrong at deserialization
"""
try:
if raw_data['status'] != 'completed':
return None
payout_date = raw_data.get('payout_at', None)
if payout_date:
timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase')
else:
timestamp = deserialize_timestamp_from_date(
raw_data['created_at'],
'iso8601',
'coinbase',
)
# Only get address/transaction id for "send" type of transactions
address = None
transaction_id = None
# movement_category: Union[Literal['deposit'], Literal['withdrawal']]
if 'type' in raw_data:
# Then this should be a "send" which is the way Coinbase uses to send
# crypto outside of the exchange
# https://developers.coinbase.com/api/v2?python#transaction-resource
msg = 'Non "send" type found in coinbase deposit/withdrawal processing'
assert raw_data['type'] == 'send', msg
movement_category = AssetMovementCategory.WITHDRAWAL
# Can't see the fee being charged from the "send" resource
amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])
asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)
# Fees dont appear in the docs but from an experiment of sending ETH
# to an address from coinbase there is the network fee in the response
fee = Fee(ZERO)
raw_network = raw_data.get('network', None)
if raw_network:
raw_fee = raw_network.get('transaction_fee', None)
if raw_fee:
# Since this is a withdrawal the fee should be the same as the moved asset
if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp):
# If not we set ZERO fee and ignore
log.error(
f'In a coinbase withdrawal of {asset.identifier} the fee'
f'is denoted in {raw_fee["currency"]}',
)
else:
fee = deserialize_fee(raw_fee['amount'])
if 'network' in raw_data:
transaction_id = get_key_if_has_val(raw_data['network'], 'hash')
if 'to' in raw_data:
address = deserialize_asset_movement_address(raw_data['to'], 'address', asset)
else:
movement_category = deserialize_asset_movement_category(raw_data['resource'])
amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])
fee = deserialize_fee(raw_data['fee']['amount'])
asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)
return AssetMovement(
location=Location.COINBASE,
category=movement_category,
address=address,
transaction_id=transaction_id,
timestamp=timestamp,
asset=asset,
amount=amount,
fee_asset=asset,
fee=fee,
link=str(raw_data['id']),
)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase deposit/withdrawal with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase deposit/withdrawal with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Unexpected data encountered during deserialization of a coinbase '
'asset movement. Check logs for details and open a bug report.',
)
log.error(
f'Unexpected data encountered during deserialization of coinbase '
f'asset_movement {raw_data}. Error was: {str(e)}',
)
return None
def query_online_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[AssetMovement]:
account_data = self._api_query('accounts')
account_ids = self._get_account_ids(account_data)
raw_data = []
for account_id in account_ids:
raw_data.extend(self._api_query(f'accounts/{account_id}/deposits'))
raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals'))
# also get transactions to get the "sends", which in Coinbase is the
# way to send Crypto out of the exchange
txs = self._api_query(f'accounts/{account_id}/transactions')
for tx in txs:
if 'type' not in tx:
continue
if tx['type'] == 'send':
raw_data.append(tx)
log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data))
movements = []
for raw_movement in raw_data:
movement = self._deserialize_asset_movement(raw_movement)
# limit coinbase deposit/withdrawals in the requested time range
# here since there is no argument in the API call
if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts:
movements.append(movement)
return movements
| 40.688468 | 98 | 0.592047 | 20,182 | 0.853723 | 0 | 0 | 2,906 | 0.122927 | 0 | 0 | 7,542 | 0.319036 |
b9994eb6b47f29e07dc9f474ab82878fdc8ae029 | 3,533 | py | Python | lib/python3.7/site-packages/ldap/controls/deref.py | aonrobot/MSC-thug-auth-provider | aef37ef5a000586b8502cc536244f31e08b9c2db | [
"Apache-2.0"
] | 1 | 2019-06-21T11:51:26.000Z | 2019-06-21T11:51:26.000Z | lib/python3.7/site-packages/ldap/controls/deref.py | aonrobot/MSC-thug-auth-provider | aef37ef5a000586b8502cc536244f31e08b9c2db | [
"Apache-2.0"
] | 13 | 2019-07-03T21:28:31.000Z | 2022-02-26T10:42:05.000Z | lib/python3.7/site-packages/ldap/controls/deref.py | aonrobot/MSC-thug-auth-provider | aef37ef5a000586b8502cc536244f31e08b9c2db | [
"Apache-2.0"
] | 2 | 2020-02-11T09:34:39.000Z | 2020-11-10T14:41:32.000Z | # -*- coding: utf-8 -*-
"""
ldap.controls.deref - classes for
(see https://tools.ietf.org/html/draft-masarati-ldap-deref)
See https://www.python-ldap.org/ for project details.
"""
__all__ = [
'DEREF_CONTROL_OID',
'DereferenceControl',
]
import ldap.controls
from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS
import pyasn1_modules.rfc2251
from pyasn1.type import namedtype,univ,tag
from pyasn1.codec.ber import encoder,decoder
from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue
DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16'
# Request types
#---------------------------------------------------------------------------
# For compatibility with ASN.1 declaration in I-D
AttributeList = AttributeDescriptionList
class DerefSpec(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'derefAttr',
AttributeDescription()
),
namedtype.NamedType(
'attributes',
AttributeList()
),
)
class DerefSpecs(univ.SequenceOf):
componentType = DerefSpec()
# Response types
#---------------------------------------------------------------------------
class AttributeValues(univ.SetOf):
componentType = AttributeValue()
class PartialAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', AttributeValues()),
)
class PartialAttributeList(univ.SequenceOf):
componentType = PartialAttribute()
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
class DerefRes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('derefAttr', AttributeDescription()),
namedtype.NamedType('derefVal', LDAPDN()),
namedtype.OptionalNamedType('attrVals', PartialAttributeList()),
)
class DerefResultControlValue(univ.SequenceOf):
componentType = DerefRes()
class DereferenceControl(LDAPControl):
controlType = DEREF_CONTROL_OID
def __init__(self,criticality=False,derefSpecs=None):
LDAPControl.__init__(self,self.controlType,criticality)
self.derefSpecs = derefSpecs or {}
def _derefSpecs(self):
deref_specs = DerefSpecs()
i = 0
for deref_attr,deref_attribute_names in self.derefSpecs.items():
deref_spec = DerefSpec()
deref_attributes = AttributeList()
for j in range(len(deref_attribute_names)):
deref_attributes.setComponentByPosition(j,deref_attribute_names[j])
deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr))
deref_spec.setComponentByName('attributes',deref_attributes)
deref_specs.setComponentByPosition(i,deref_spec)
i += 1
return deref_specs
def encodeControlValue(self):
return encoder.encode(self._derefSpecs())
def decodeControlValue(self,encodedControlValue):
decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue())
self.derefRes = {}
for deref_res in decodedValue:
deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2]
partial_attrs_dict = {
str(tv[0]): [str(v) for v in tv[1]]
for tv in deref_vals or []
}
try:
self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict))
except KeyError:
self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)]
KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl
| 29.441667 | 102 | 0.711577 | 2,552 | 0.722332 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.160204 |
b999aec7c34874ef90e0f30812ac97217ce90cca | 3,145 | py | Python | emoji.py | notagoat/Deepmoji | 1ab922306c3647f9c7ea98caa2660a53b18fe4b6 | [
"MIT"
] | 1 | 2020-03-19T20:09:00.000Z | 2020-03-19T20:09:00.000Z | emoji.py | notagoat/Deepmoji | 1ab922306c3647f9c7ea98caa2660a53b18fe4b6 | [
"MIT"
] | null | null | null | emoji.py | notagoat/Deepmoji | 1ab922306c3647f9c7ea98caa2660a53b18fe4b6 | [
"MIT"
] | null | null | null | import requests
import urllib.request
import os.path
import shutil
import csv
def main():
with open("data.csv") as i: #Open the data.csv file
instances = i.readlines() #Write them into memory
instances = [x.strip() for x in instances] #Strip any weird issues from writing
instances.sort() #Sort them alphabetically
setup(instances) #Run setup to create all the necessary files and subfolders
count = len(instances) #Get the count just for fun
i = 0
try:
for name in instances:
try:
i += 1
print("-----!"+name+"!-----")
print(str(i) +" of " + str(count) + " remaining!")
fetch(name) #Run the fetching code
except Exception as e:
print(e) #Print the error. We catch errors here for pleroma instances, weirdly encoded urls, etc
pass #Don't stop the beat
except Exception as e:
print("Instance Error")
print(e)
pass
clone(instances) #Clone all of them into one big folder for ease of access
def fetch(name):
r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard url for fetching data
path = "emoji/%s/" % name #Because of the clone function we know all of these folders will exist
try:
for emoji in r.json(): #Emoji = the json code from the request
try:
if os.path.isfile(path+emoji['shortcode']+".png"): #Check to see if it exists.
pass
else:
if "ms_" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most of them). #Mutant standard is huge and common
#print(emoji['shortcode'] + " found!")
emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json
open(path + emoji['shortcode']+".png",'wb').write(emojiimage.content) #Now save it as an image in the filesystem
except Exception as e:
print("Did not get: " + emoji['url']) #If somethings fucky throw a nice error then keep going.
print(e)
pass
except Exception as e:
print(e)
def setup(instances):
if (os.path.isdir("emoji/")): #Check to see if emoji/ exists
pass
else:
os.mkdir("emoji/") #make it if it doesnt
for name in instances:
if (os.path.isdir("emoji/%s/"%name)):
pass
else: os.mkdir("emoji/%s/"%name)
if (os.path.isdir("emoji/all")):
pass
else:
os.mkdir("emoji/all")
def clone(instances):
for name in instances:
print("Copying emoji for: %s"% name)
path = "emoji/%s/" % name
files = os.listdir(path)
for name in files: #This gets alll files
try:
shutil.copyfile(path+name,"emoji/all/"+name) #Then copies them into the all folder
except Exception as e:
print(e)
pass
if __name__ == '__main__':
main()
| 37.440476 | 151 | 0.574245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,256 | 0.399364 |
b99add86778172fa08bc930ed29f8f26a88ec4d3 | 943 | py | Python | String/640.One Edit Distance/Solution_DP.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 12 | 2019-05-04T04:21:27.000Z | 2022-03-02T07:06:57.000Z | String/640.One Edit Distance/Solution_DP.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 1 | 2019-07-24T18:43:53.000Z | 2019-07-24T18:43:53.000Z | String/640.One Edit Distance/Solution_DP.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 10 | 2019-07-01T04:03:04.000Z | 2022-03-09T03:57:37.000Z | class Solution:
"""
@param s: a string
@param t: a string
@return: true if they are both one edit distance apart or false
"""
def isOneEditDistance(self, s, t):
# write your code here
if s == t:
return False
if abs(len(s) - len(t)) > 1:
return False
n, m = len(s), len(t)
f = [[0] * (m + 1) for _ in range(2)]
for j in range(m + 1):
f[0][j] = j
for i in range(1, n + 1):
f[i % 2][0] = i
for j in range(1, m + 1):
if s[i - 1] == t[j - 1]:
f[i % 2][j] = min(f[(i - 1) % 2][j - 1],
f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)
else:
f[i % 2][j] = min(f[(i - 1) % 2][j - 1] + 1,
f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)
return f[n % 2][m] == 1
| 29.46875 | 81 | 0.341463 | 942 | 0.99894 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.155885 |
b99b1d1ec6004cbeeb91e19410dbbb1e2216c45e | 1,478 | py | Python | nsq/__init__.py | jehiah/pynsq | 899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96 | [
"MIT"
] | 1 | 2015-05-25T00:23:53.000Z | 2015-05-25T00:23:53.000Z | nsq/__init__.py | barkinet/pynsq | 899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96 | [
"MIT"
] | null | null | null | nsq/__init__.py | barkinet/pynsq | 899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import signal
import tornado.ioloop
import logging
from .protocol import (
Error,
unpack_response,
decode_message,
valid_topic_name,
valid_channel_name,
identify,
subscribe,
ready,
finish,
touch,
requeue,
nop,
pub,
mpub,
FRAME_TYPE_RESPONSE,
FRAME_TYPE_ERROR,
FRAME_TYPE_MESSAGE,
)
from .message import Message
from .backoff_timer import BackoffTimer
from .sync import SyncConn
from .async import AsyncConn
from .reader import Reader
from .legacy_reader import LegacyReader
from .writer import Writer
from .version import __version__ # NOQA
def _handle_term_signal(sig_num, frame):
logging.getLogger(__name__).info(
'TERM Signal handler called with signal %r', sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
"""
Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer`
"""
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
__author__ = "Matt Reiferson <[email protected]>"
__all__ = ["Reader", "Writer", "run", "BackoffTimer", "Message", "Error", "LegacyReader",
"SyncConn", "AsyncConn", "unpack_response", "decode_message",
"identify", "subscribe", "ready", "finish", "touch", "requeue", "nop", "pub", "mpub",
"valid_topic_name", "valid_channel_name",
"FRAME_TYPE_RESPONSE", "FRAME_TYPE_ERROR", "FRAME_TYPE_MESSAGE"]
| 26.392857 | 96 | 0.696211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.304465 |
b99b2da4f2ac2ca37d2ded7c72545cef1cab4228 | 5,356 | py | Python | scripts/summaryPlot.py | Hespian/ParFastKer | 5ddf1685c0652e73c889cfc64c7ec1fd827f905c | [
"BSD-3-Clause",
"MIT"
] | 3 | 2019-08-10T08:24:19.000Z | 2019-08-12T07:16:03.000Z | scripts/summaryPlot.py | Hespian/ParFastKer | 5ddf1685c0652e73c889cfc64c7ec1fd827f905c | [
"BSD-3-Clause",
"MIT"
] | null | null | null | scripts/summaryPlot.py | Hespian/ParFastKer | 5ddf1685c0652e73c889cfc64c7ec1fd827f905c | [
"BSD-3-Clause",
"MIT"
] | null | null | null | import get_data_ours
import get_data_akiba
import get_data_NearLinear
import get_data_LinearTime
import os
import matplotlib.pyplot as plt
# graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "RHG-100000000-nodes-2000000000-edges", "delaunay_n24", "del26"]
graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "delaunay_n24", "del26"]
linearTimeDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs"
partitioningDir = "../../LinearTimeKernels/partitions"
ourTimeDir = "../../results/LinearTimeKernelsScalingAll"
nearLinearDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear"
akibaDir = "../../akiba_vertex_cover/results"
def getOurTimeAndSizeSequential(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["sequential_quasikernel_time"] + res["lineartime_time"]
result["size"] = res["sequential_quasikernel_size"]
return result
def getOurTimeAndSizeParallel(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["parallel_quasikernel_time"] + res["lineartime_time"] + res["partitioning_time"]
result["size"] = res["parallel_quasikernel_size"]
return result
def getAkibaTimeAndSize(graph):
return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir)
def getNearLinearTimeAndSize(graph):
return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir)
def getLinearTimeTimeAndSize(graph):
return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir)
def minProperty(graph, prop):
oursequential = getOurTimeAndSizeSequential(graph)[prop]
ourparallel = getOurTimeAndSizeParallel(graph)[prop]
akiba = getAkibaTimeAndSize(graph)[prop]
nearLinear = getNearLinearTimeAndSize(graph)[prop]
linearTime = getLinearTimeTimeAndSize(graph)[prop]
data = [oursequential, ourparallel, akiba, nearLinear, linearTime]
# data = [oursequential, ourparallel, akiba, nearLinear]
data = filter(lambda x : x >= 0, data)
minimum = min(data)
if minimum == 0:
return 1
return minimum
oursizeSequential = []
ourtimeSequential = []
oursizeParallel = []
ourtimeParallel = []
akibasize = []
akibatime = []
nearlinearsize = []
nearlineartime = []
lineartimesize = []
lineartimetime = []
for graph in graphs:
minsize = getAkibaTimeAndSize(graph)["size"]
mintime = getAkibaTimeAndSize(graph)["time"]
oss = getOurTimeAndSizeSequential(graph)["size"] / minsize
# print(graph + "(sequential): " + str(getOurTimeAndSizeSequential(graph)["size"]))
ots = getOurTimeAndSizeSequential(graph)["time"] / mintime
if oss > 0 and ots > 0:
oursizeSequential.append(oss)
ourtimeSequential.append(ots)
osp = getOurTimeAndSizeParallel(graph)["size"] / minsize
# print(graph + "(parallel): " + str(getOurTimeAndSizeParallel(graph)["size"]))
otp = getOurTimeAndSizeParallel(graph)["time"] / mintime
if osp > 0 and otp > 0:
oursizeParallel.append(osp)
ourtimeParallel.append(otp)
aks = getAkibaTimeAndSize(graph)["size"] / minsize
akt = getAkibaTimeAndSize(graph)["time"] / mintime
if aks > 0 and akt > 0:
akibasize.append(aks)
akibatime.append(akt)
nls = getNearLinearTimeAndSize(graph)["size"] / minsize
nlt = getNearLinearTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
nearlinearsize.append(nls)
nearlineartime.append(nlt)
lts = getLinearTimeTimeAndSize(graph)["size"] / minsize
ltt = getLinearTimeTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
lineartimesize.append(lts)
lineartimetime.append(ltt)
# print("We")
# print(oursizeSequential)
# print(ourtimeSequential)
# print("We (parallel)")
# print(oursizeParallel)
# print(ourtimeParallel)
# print("Akiba")
# print(akibasize)
# print(akibatime)
# print("NearLinear")
# print(nearlinearsize)
# print(nearlineartime)
# print("LinearTime")
# print(lineartimesize)
# print(lineartimetime)
plt.rc('font', size=14)
fig = plt.figure(figsize=(3.2, 2.4))
ax = fig.add_subplot(1,1,1)
plt.title("Summary", fontsize=14)
ax.set_yscale("log")
ax.set_xscale("log")
ax.scatter(ourtimeSequential, oursizeSequential, label="FastKer", marker="x", color="green")
ax.scatter(ourtimeParallel, oursizeParallel, label="ParFastKer", marker="+", color="black")
# ax.scatter(akibatime, akibasize, label="VCSolver", marker="^", edgecolors="blue", facecolors="none")
ax.scatter(nearlineartime, nearlinearsize, label="NearLinear", marker="o", edgecolors="red", facecolors="none")
ax.scatter(lineartimetime, lineartimesize, label="LinearTime", marker="^", edgecolors="magenta", facecolors="none")
plt.xlabel("time / VCSolver time")
plt.ylabel("size / VCSolver size")
plt.xticks([0.0001, 0.01, 1])
ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode="expand")
plt.savefig("summaryplot_vcsolver_baseline.pdf", bbox_inches="tight")
# plt.show()
| 39.094891 | 234 | 0.720127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,795 | 0.335138 |
b99c2305beceab596bedee8ad399b6faa3216070 | 3,587 | py | Python | bouncer/cli/base.py | lrnt/git-bouncer | 3015e11a5d2c90986124de73bf1fd0f5a8563360 | [
"MIT"
] | null | null | null | bouncer/cli/base.py | lrnt/git-bouncer | 3015e11a5d2c90986124de73bf1fd0f5a8563360 | [
"MIT"
] | null | null | null | bouncer/cli/base.py | lrnt/git-bouncer | 3015e11a5d2c90986124de73bf1fd0f5a8563360 | [
"MIT"
] | null | null | null | import configparser
import sys
import inspect
from argparse import ArgumentParser, RawDescriptionHelpFormatter
def opt(*args, **kwargs):
def decorator(method):
if not hasattr(method, 'options'):
method.options = []
method.options.append((args, kwargs))
return method
return decorator
def noopts(method):
method.options = []
return method
class HelpMixin(object):
def help(self):
print('available commands:')
for name, command in self.commands.items():
description = str(command.__doc__ or '').strip('\n')
print(' ', name.ljust(10), description)
return 1
class SubParser(HelpMixin):
def __init__(self, commands):
self.commands = self._commands(commands)
def _commands(self, commands):
prog = sys.argv[0]
result = {}
for cmd in commands:
name = getattr(cmd, '_name', None)
if not name:
continue
cmd.prog = prog
result[name] = cmd
return result
def run(self):
args = sys.argv[1:]
for index, arg in enumerate(args):
if arg in self.commands.keys():
args.pop(index)
return self.commands[arg](args)
return self.help()
class Command(HelpMixin):
def __init__(self):
self.global_options = []
self.commands = self._methods_with_opts()
def _methods_with_opts(self):
result = {}
for name in dir(self):
if name.startswith('__'):
continue
method = getattr(self, name)
if not hasattr(method, 'options'):
continue
result[name] = method
return result
def _parse_args(self, method, args):
prog = '{} {} {}'.format(self.prog, self._name, method.__name__)
parser = ArgumentParser(
prog=prog,
description=(method.__doc__ or ''),
formatter_class=RawDescriptionHelpFormatter
)
for opt in method.options + self.global_options:
parser.add_argument(*opt[0], **opt[1])
return vars(parser.parse_args(args))
def _call_method(self, method, args):
# Find out which arguments the method expects
expected_args, _, _, _ = inspect.getargspec(method)
expected_args.remove('self')
self_args = self._parse_args(method, args)
method_args = {}
# Get the expected method arguments, ignore rest
for name in expected_args:
if name in args:
method_args[name] = args.pop(name)
# Put rest of the arguments in self
for name, value in self_args.items():
setattr(self, name, value)
self.pre_command()
return method(**method_args)
def __call__(self, args):
for index, arg in enumerate(args):
if arg in self.commands.keys():
args.pop(index)
return self._call_method(self.commands[arg], args)
return self.help()
def opt(self, *args, **kwargs):
self.global_options.append((args, kwargs))
def pre_command(self):
pass
class BaseCommand(Command):
def __init__(self):
super(BaseCommand, self).__init__()
self.opt(
'-c', dest='config_path', help='Configuration file',
default='~/.test.conf'
)
def pre_command(self):
config = configparser.ConfigParser()
config.read(self.config_path)
print(config.sections())
| 27.381679 | 72 | 0.578199 | 3,180 | 0.886535 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.071648 |
b99c4d9fb380e0635cac67dff2a6820b500bf34f | 13,728 | py | Python | Examples/ExampleCodes_ssccoorriinngg.py | MahdadJafarzadeh/ssccoorriinngg | 63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3 | [
"MIT"
] | 2 | 2020-04-28T12:50:26.000Z | 2020-05-13T08:52:42.000Z | Examples/ExampleCodes_ssccoorriinngg.py | MahdadJafarzadeh/ssccoorriinngg | 63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3 | [
"MIT"
] | null | null | null | Examples/ExampleCodes_ssccoorriinngg.py | MahdadJafarzadeh/ssccoorriinngg | 63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3 | [
"MIT"
] | 1 | 2020-07-14T13:48:56.000Z | 2020-07-14T13:48:56.000Z | #%% Import libs
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score
import h5py
import time
from ssccoorriinngg import ssccoorriinngg
import numpy as np
from sklearn.model_selection import cross_validate
#%% Picking featureset of interest and apply classification
Object = ssccoorriinngg(filename='', channel='', fs = 200, T = 30)
path = 'C:/PhD/ML in depression/'
fname = 'feat42_Fp1-Fp2_train'
feats = 'featureset'
labels = 'labels'
# Train set
X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels)
# Test set
fname = 'feat42_Fp1-Fp2_test'
X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels)
# Define the scoring criteria:
scoring = {'accuracy' : make_scorer(accuracy_score),
'precision' : make_scorer(precision_score),
'recall' : make_scorer(recall_score),
'f1_score' : make_scorer(f1_score)}
# Cross-validation using logistic Random Forests
y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv = 10)
Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF)
# Cross-validation using XGBoost
y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000,
cv = 10 , max_depth=3, learning_rate=.1)
Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb)
#%% Outcome measures
# Defien required metrics here:
Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score']
for metric in Metrics:
#RF
r1 = results_RF[metric].mean()
std1 = results_RF[metric].std()
print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}')
# xgb
r2 = results_xgb[metric].mean()
std2 = results_xgb[metric].std()
print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}')
# SVM
r3 = results_SVM[metric].mean()
std3 = results_SVM[metric].std()
print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}')
# LR
r4 = results_LR[metric].mean()
std4 = results_LR[metric].std()
print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}')
#%% Applying Randomized grid search to find the best config. of RF
BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y,
estimator = RandomForestClassifier(), scoring = scoring,
n_estimators = [int(x) for x in np.arange(10, 500, 20)],
max_features = ['log2', 'sqrt'],
max_depth = [int(x) for x in np.arange(10, 100, 30)],
min_samples_split = [2, 5, 10],
min_samples_leaf = [1, 2, 4],
bootstrap = [True, False],
n_iter = 100, cv = 10)
#%% Test feature selection methods ##
# PCA
PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5)
# Boruta
ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7)
# Lasso
Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1)
#ANOVA
Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80)
#Recruisive
ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20)
#### NOW TEST CLASSIFIERS WITH SELECTED FEATS
results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv = 10)
#%% Example save featureset
path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3')
#%% Example load features:
X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/',
fname = 'feat42_N3_fp2-M1',
feats = 'featureset',
labels = 'labels')
#%% Combining some REM and SWS epochs
Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/',
ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1',
REM_fname = 'tr90_fp1-M2_fp2-M1',
saving = True, fname_save = 'tr90_N3&REM_fp1-M2')
#%% How to save some results?
directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/'
fname = '42feats_N3'
with h5py.File((directory+fname + '.h5'), 'w') as wf:
# Accuracies
dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy'])
dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy'])
dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy'])
dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy'])
# Precision
dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision'])
dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision'])
dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision'])
dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision'])
# Recall
dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall'])
dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall'])
dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall'])
dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall'])
# f1-score
dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score'])
dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score'])
dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score'])
dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score'])
#%% Extracting features from more than one channel:
tic = time.time()
########### Central electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
fname_C_N3 = (main_path+"tr90_N3_C3-M2_C4-M1.h5")
fname_C_REM = (main_path+"tr90_REM_C3-M2_C4-M1.h5")
ch_C4 = 'C4-M1'
ch_C3 = 'C3-M2'
Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30)
X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction()
Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM')
Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30)
X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction()
Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM')
Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30)
X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction()
Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3')
Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30)
X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction()
Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3')
########### Occipital electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
fname_O_N3 = (main_path+"tr90_N3_O1-M2_O2-M1.h5")
fname_O_REM = (main_path+"tr90_REM_O1-M2_O2-M1.h5")
ch_O2 = 'O2-M1'
ch_O1 = 'O1-M2'
Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30)
X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction()
Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM')
Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30)
X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction()
Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM')
Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30)
X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction()
Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3')
Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30)
X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction()
Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3')
########### Fp electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
fname_fp_N3 = (main_path+"tr90_N3_fp1-M2_fp2-M1.h5")
fname_fp_REM = (main_path+"tr90_REM_fp1-M2_fp2-M1.h5")
ch_fp2 = 'fp2-M1'
ch_fp1 = 'fp1-M2'
Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30)
X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction()
Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM')
Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30)
X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction()
Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM')
Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30)
X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction()
Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3')
Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30)
X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction()
Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3')
toc = time.time()
print(f'time taken: {toc - tic}')
########## Concatenate all features #########
# RIGHT hemisphere - REM
X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM))
X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM))
# RIGHT hemisphere - N3
X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3))
X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3))
# LEFT hemisphere - REM
X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM))
X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM))
# LEFT hemisphere - N3
X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3))
X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3))
# Both sides - REM
X_REM = np.column_stack((X_rh_REM, X_lh_REM))
# Both sides - N3
X_N3 = np.column_stack((X_rh_N3, X_lh_N3))
# Combine SWS and REM
X_SWS_REM = np.row_stack((X_N3, X_REM))
y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM))
# SAVE ALL COMBINATIONS
Object = ML_Depression(filename='', channel='', fs = 200, T = 30)
# one hemisphere
Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM')
Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM')
Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3')
Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3')
# Both hemisphere
Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3')
Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM')
# Both hemispheres- SWS &REM combination
Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM')
#%% Load features from different brain regions, sleep stage and combine them
Object = ML_Depression(filename='', channel='', fs = 200, T = 30)
path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
feats = 'featureset'
labels = 'labels'
# Pick right hemisphere N3
fname_rh_N3 = 'feat42_rh_N3'
X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels)
# Pick left hemisphere N3
fname_lh_N3 = 'feat42_lh_N3'
X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels)
# Pick right hemisphere REM
fname_rh_REM = 'feat42_rh_REM'
X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels)
# Pick LEFT hemisphere REM
fname_lh_REM = 'feat42_lh_REM'
X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels)
# Combine them
X_N3 = np.column_stack((X_rh_N3, X_lh_N3))
X_REM = np.column_stack((X_rh_REM, X_lh_REM))
# Save combination
Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3')
Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')
| 53.209302 | 127 | 0.682984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,709 | 0.270178 |
b99d08420cae81be117acdda96af821aba38eea2 | 6,891 | py | Python | igibson/examples/behavior/behavior_demo_collection.py | suresh-guttikonda/iGibson | a69e623058180146466cd52d4bb3c00d1facdacf | [
"MIT"
] | null | null | null | igibson/examples/behavior/behavior_demo_collection.py | suresh-guttikonda/iGibson | a69e623058180146466cd52d4bb3c00d1facdacf | [
"MIT"
] | null | null | null | igibson/examples/behavior/behavior_demo_collection.py | suresh-guttikonda/iGibson | a69e623058180146466cd52d4bb3c00d1facdacf | [
"MIT"
] | null | null | null | """
Main BEHAVIOR demo collection entrypoint
"""
import argparse
import copy
import datetime
import os
import bddl
import numpy as np
import igibson
from igibson.activity.activity_base import iGBEHAVIORActivityInstance
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings
from igibson.simulator import Simulator
from igibson.utils.ig_logging import IGLogWriter
POST_TASK_STEPS = 200
PHYSICS_WARMING_STEPS = 200
def parse_args():
scene_choices = [
"Beechwood_0_int",
"Beechwood_1_int",
"Benevolence_0_int",
"Benevolence_1_int",
"Benevolence_2_int",
"Ihlen_0_int",
"Ihlen_1_int",
"Merom_0_int",
"Merom_1_int",
"Pomaria_0_int",
"Pomaria_1_int",
"Pomaria_2_int",
"Rs_int",
"Wainscott_0_int",
"Wainscott_1_int",
]
task_id_choices = [0, 1]
parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
parser.add_argument(
"--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl."
)
parser.add_argument(
"--task_id",
type=int,
required=True,
choices=task_id_choices,
nargs="?",
help="BDDL integer ID, matching suffix of bddl.",
)
parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log")
parser.add_argument(
"--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes."
)
parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.")
parser.add_argument(
"--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches."
)
parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
parser.add_argument(
"--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions."
)
parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.")
return parser.parse_args()
def main():
args = parse_args()
bddl.set_backend("iGibson")
collect_demo(
args.task,
args.task_id,
args.scene,
args.vr_log_path,
args.disable_save,
args.max_steps,
args.no_vr,
args.disable_scene_cache,
args.profile,
)
def collect_demo(
task,
task_id,
scene,
vr_log_path=None,
disable_save=False,
max_steps=-1,
no_vr=False,
disable_scene_cache=False,
profile=False,
):
# HDR files for PBR rendering
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
# VR rendering settings
vr_rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
enable_pbr=True,
msaa=False,
light_dimming_factor=1.0,
)
# VR system settings
mode = "headless" if no_vr else "vr"
s = Simulator(
mode=mode,
rendering_settings=vr_rendering_settings,
vr_settings=VrSettings(use_vr=True),
physics_timestep=1 / 300.0,
render_timestep=1 / 30.0,
)
igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)
scene_kwargs = None
online_sampling = True
if not disable_scene_cache:
scene_kwargs = {
"urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id),
}
online_sampling = False
igbhvr_act_inst.initialize_simulator(
simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling
)
vr_agent = igbhvr_act_inst.simulator.robots[0]
if not no_vr:
vr_cs = VrConditionSwitcher(
igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction
)
log_writer = None
if not disable_save:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if vr_log_path is None:
vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp)
log_writer = IGLogWriter(
s,
log_filepath=vr_log_path,
task=igbhvr_act_inst,
store_vr=False if no_vr else True,
vr_robot=vr_agent,
profiling_mode=profile,
filter_objects=True,
)
log_writer.set_up_data_storage()
satisfied_predicates_cached = {}
post_task_steps = copy.deepcopy(POST_TASK_STEPS)
physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)
steps = 0
while max_steps < 0 or steps < max_steps:
igbhvr_act_inst.simulator.step(print_stats=profile)
task_done, satisfied_predicates = igbhvr_act_inst.check_success()
if no_vr:
if steps < 2:
action = np.zeros((28,))
action[19] = 1
action[27] = 1
else:
action = np.random.uniform(-0.01, 0.01, size=(28,))
else:
action = igbhvr_act_inst.simulator.gen_vr_robot_action()
if steps < physics_warming_steps:
action = np.zeros_like(action)
vr_agent.update(action)
if not no_vr:
if satisfied_predicates != satisfied_predicates_cached:
vr_cs.refresh_condition(switch=False)
satisfied_predicates_cached = satisfied_predicates
if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"):
vr_cs.refresh_condition()
if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"):
vr_cs.toggle_show_state()
if log_writer and not disable_save:
log_writer.process_frame()
if task_done:
post_task_steps -= 1
if post_task_steps == 0:
break
steps += 1
if log_writer and not disable_save:
log_writer.end_log_session()
s.disconnect()
if __name__ == "__main__":
main()
| 31.465753 | 120 | 0.652881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,308 | 0.189813 |
b99e3b0ee335439a781ae231769595415a1dc6ec | 546 | py | Python | wagtail/wagtailadmin/menu.py | digitalmarmalade/wagtail | ac4d23172ff3f42746625630583b17d243fb9822 | [
"BSD-3-Clause"
] | 1 | 2015-11-05T18:02:04.000Z | 2015-11-05T18:02:04.000Z | wagtail/wagtailadmin/menu.py | digitalmarmalade/wagtail | ac4d23172ff3f42746625630583b17d243fb9822 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailadmin/menu.py | digitalmarmalade/wagtail | ac4d23172ff3f42746625630583b17d243fb9822 | [
"BSD-3-Clause"
] | null | null | null | from django.utils.text import slugify
from django.utils.html import format_html
class MenuItem(object):
def __init__(self, label, url, name=None, classnames='', order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(unicode(label)))
self.order = order
def render_html(self):
return format_html(
u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>""",
self.name, self.url, self.classnames, self.label)
| 32.117647 | 79 | 0.611722 | 463 | 0.847985 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.124542 |
b99ee5dfe9849188796ff8d2b024b524adedb8d2 | 1,950 | py | Python | django_mfa/migrations/0001_initial.py | timgates42/django-mfa | 89eeb83f7da3ea24f205b40b13c7f9d33ea15b99 | [
"MIT"
] | null | null | null | django_mfa/migrations/0001_initial.py | timgates42/django-mfa | 89eeb83f7da3ea24f205b40b13c7f9d33ea15b99 | [
"MIT"
] | null | null | null | django_mfa/migrations/0001_initial.py | timgates42/django-mfa | 89eeb83f7da3ea24f205b40b13c7f9d33ea15b99 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-03-26 11:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='U2FKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_used_at', models.DateTimeField(null=True)),
('public_key', models.TextField(unique=True)),
('key_handle', models.TextField()),
('app_id', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserOTP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)),
('secret_key', models.CharField(blank=True, max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserRecoveryCodes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('secret_code', models.CharField(max_length=10)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')),
],
),
]
| 41.489362 | 143 | 0.598974 | 1,791 | 0.918462 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.139487 |
b99f21827c3ba7ccbcab4806c878cdacfa139e20 | 317 | py | Python | app/logger_example/main.py | khanh-nguyen-code/my-collection | 31581ef0b1dae67aafb1f4e64b9973a38cc01edf | [
"MIT"
] | null | null | null | app/logger_example/main.py | khanh-nguyen-code/my-collection | 31581ef0b1dae67aafb1f4e64b9973a38cc01edf | [
"MIT"
] | null | null | null | app/logger_example/main.py | khanh-nguyen-code/my-collection | 31581ef0b1dae67aafb1f4e64b9973a38cc01edf | [
"MIT"
] | null | null | null | from my_collection import logger
if __name__ == "__main__":
logger.now().debug("debug1")
logger.now().debug("debug2")
logger.now().info("hello1")
logger.now().info("hello2")
logger.now().with_field("key", "val").error("with field1")
logger.now().with_field("key", "val").error("with field2")
| 31.7 | 62 | 0.646688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.277603 |
b9a14f8cda479b51cbe9296c63d8ae7397078bc7 | 760 | py | Python | robotframework_iperf3/__main__.py | scathaig/robotframework-iperf3 | cfeeb3e265777403d7eb06fcfa6d69650f2a5e67 | [
"Apache-2.0"
] | null | null | null | robotframework_iperf3/__main__.py | scathaig/robotframework-iperf3 | cfeeb3e265777403d7eb06fcfa6d69650f2a5e67 | [
"Apache-2.0"
] | null | null | null | robotframework_iperf3/__main__.py | scathaig/robotframework-iperf3 | cfeeb3e265777403d7eb06fcfa6d69650f2a5e67 | [
"Apache-2.0"
] | null | null | null | import argparse
from robotremoteserver import RobotRemoteServer
from .iperf3 import Iperf3
if __name__ == '__main__':
# create commandline parser
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.prog = 'python3 -m robotframework_iperf3'
# add parser options
parser.add_argument(
"-a",
"--address",
type=str,
help="server listen address",
default='0.0.0.0')
parser.add_argument(
"-p",
"--port",
type=int,
help="server listen port",
default=8270)
args = parser.parse_args()
server = RobotRemoteServer(
Iperf3(),
host=args.address,
port=args.port
)
server.serve()
| 21.111111 | 92 | 0.619737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.223684 |
b9a1ae11b40a499e6f6854e1a273c2ff226ef650 | 692 | py | Python | h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | df8.cbind(df9)
# A B C D A0 B0 C0 D0
# ----- ------ ------ ------ ------ ----- ----- -----
# -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86
# -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27
# 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25
# 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63
# 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52
# 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09
# 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63
# 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42
# -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45
# 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05
#
# [100 rows x 8 columns] | 43.25 | 54 | 0.460983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 663 | 0.958092 |
b9a1dbb5125acea57356714e95e66c8e3a612e30 | 1,101 | py | Python | FluentPython/dynamic_attr_and_prop/frozen_json.py | xu6148152/Binea_Python_Project | d943eb5f4685d08f080b372dcf1a7cbd5d63efed | [
"MIT"
] | null | null | null | FluentPython/dynamic_attr_and_prop/frozen_json.py | xu6148152/Binea_Python_Project | d943eb5f4685d08f080b372dcf1a7cbd5d63efed | [
"MIT"
] | null | null | null | FluentPython/dynamic_attr_and_prop/frozen_json.py | xu6148152/Binea_Python_Project | d943eb5f4685d08f080b372dcf1a7cbd5d63efed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from collections import abc
from keyword import iskeyword
class FronzenJSON:
def __init__(self, mapping):
self._data = {}
for key, value in mapping.items():
if iskeyword(key):
key += '_'
# self._data = dict(mapping)
self._data[key] = value
def __getattr__(self, name):
if hasattr(self._data, name):
return getattr(self._data, name)
else:
# return FronzenJSON.build(self._data[name])
return FronzenJSON(self._data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableMapping):
return [cls.build(item) for item in obj]
else:
return obj
def __new__(cls, arg):
if isinstance(arg, abc.Mapping):
return super().__new__(cls)
elif isinstance(arg, abc.MutableSequence):
return [cls[item] for item in arg]
else:
return arg
| 27.525 | 56 | 0.561308 | 990 | 0.899183 | 0 | 0 | 246 | 0.223433 | 0 | 0 | 122 | 0.110808 |
b9a20089dfb3f5c8a3472d1f3be189af236d4d44 | 4,062 | py | Python | pomdp_problems/tag/models/transition_model.py | Semanti1/pomdp_findit | b96c1c06aab4b485fa005654cf6438ff63718083 | [
"MIT"
] | null | null | null | pomdp_problems/tag/models/transition_model.py | Semanti1/pomdp_findit | b96c1c06aab4b485fa005654cf6438ff63718083 | [
"MIT"
] | null | null | null | pomdp_problems/tag/models/transition_model.py | Semanti1/pomdp_findit | b96c1c06aab4b485fa005654cf6438ff63718083 | [
"MIT"
] | null | null | null | """The Tag problem. Implemented according to the paper `Anytime Point-Based
Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_.
Transition model: the robot moves deterministically. The target's movement
depends on the robot; With Pr=0.8 the target moves away from the robot,
and with Pr=0.2, the target stays at the same place. The target never
moves closer to the robot.
"""
import copy
import pomdp_py
import pomdp_problems.util as util
import pomdp_problems.tag.constants as constants
from pomdp_problems.tag.domain.action import *
class TagTransitionModel(pomdp_py.TransitionModel):
def __init__(self,
grid_map,
target_motion_policy):
self._grid_map = grid_map
self.target_motion_policy = target_motion_policy
@classmethod
def if_move_by(cls, grid_map, position, action):
if isinstance(action, MotionAction):
dx, dy = action.motion
next_position = (position[0] + dx,
position[1] + dy)
if grid_map.valid_pose(next_position):
return next_position
return position
def probability(self, next_state, state, action, **kwargs):
# Robot motion
expected_robot_position = TagTransitionModel.if_move_by(self._grid_map,
state.robot_position,
action)
if expected_robot_position != next_state.robot_position:
return constants.EPSILON
if isinstance(action, TagAction):
if next_state.target_position == next_state.robot_position:
if next_state.target_found:
return 1.0 - constants.EPSILON
else:
return constants.EPSILON
else:
if next_state.target_found:
return constants.EPSILON
else:
return 1.0 - constants.EPSILON
# Target motion
valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)
return self.target_motion_policy.probability(next_state.target_position,
state.target_position,
state.robot_position,
valid_target_motion_actions)
def sample(self, state, action, argmax=False):
# Robot motion
next_state = copy.deepcopy(state)
next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map,
state.robot_position,
action)
# If Tag action
if isinstance(action, TagAction):
if not state.target_found:
if state.robot_position == state.target_position:
next_state.target_found = True
return next_state
# Target motion
valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)
if not argmax:
next_state.target_position = self.target_motion_policy.random(state.robot_position,
state.target_position,
valid_target_motion_actions)
else:
next_state.target_position = self.target_motion_policy.mpe(state.robot_position,
state.target_position,
valid_target_motion_actions)
return next_state
def argmax(self, state, action, **kwargs):
return self.sample(state, action, argmax=True)
| 45.640449 | 103 | 0.537912 | 3,466 | 0.853274 | 0 | 0 | 359 | 0.08838 | 0 | 0 | 494 | 0.121615 |
b9a21ff5a8c4fcb07930580d031f6847ecfaed43 | 4,731 | py | Python | packit/fedpkg.py | bocekm/packit | b5da23c0fa3f205537551b9ed212d8f77d00d705 | [
"MIT"
] | null | null | null | packit/fedpkg.py | bocekm/packit | b5da23c0fa3f205537551b9ed212d8f77d00d705 | [
"MIT"
] | null | null | null | packit/fedpkg.py | bocekm/packit | b5da23c0fa3f205537551b9ed212d8f77d00d705 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from typing import Optional
from packit.exceptions import PackitCommandFailedError
from packit.utils import commands # so we can mock utils
from packit.utils.logging import logger
class FedPKG:
"""
Part of the code is from release-bot:
https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py
"""
def __init__(
self, fas_username: str = None, directory: str = None, stage: bool = False
):
self.fas_username = fas_username
self.directory = directory
self.stage = stage
self.fedpkg_exec = "fedpkg-stage" if stage else "fedpkg"
def __repr__(self):
return (
"FedPKG("
f"fas_username='{self.fas_username}', "
f"directory='{self.directory}', "
f"stage='{self.stage}')"
)
def new_sources(self, sources="", fail=True):
if not Path(self.directory).is_dir():
raise Exception("Cannot access fedpkg repository:")
return commands.run_command_remote(
cmd=[self.fedpkg_exec, "new-sources", sources],
cwd=self.directory,
error_message="Adding new sources failed:",
fail=fail,
)
def build(
self,
scratch: bool = False,
nowait: bool = False,
koji_target: Optional[str] = None,
srpm_path: Optional[Path] = None,
):
"""
build in koji
:param scratch: scratch (temporary) build or not?
:param nowait: False == wait for the build to finish
:param koji_target: koji target to build in (`koji list-targets`)
:param srpm_path: use selected SRPM for build, not dist-git repo & ref
:return:
"""
cmd = [self.fedpkg_exec, "build"]
if scratch:
cmd.append("--scratch")
if nowait:
cmd.append("--nowait")
if koji_target:
cmd += ["--target", koji_target]
if srpm_path:
cmd += ["--srpm", str(srpm_path)]
try:
commands.run_command_remote(
cmd=cmd,
cwd=self.directory,
error_message="Submission of build to koji failed.",
fail=True,
)
except PackitCommandFailedError as ex:
# fail on the fedpkg side, the build is triggered
if (
"watch_tasks() got an unexpected keyword argument 'ki_handler'"
in ex.stderr_output
):
logger.info(
"The 'fedpkg build' command crashed which is a known issue: "
"the build is submitted in koji anyway."
)
logger.debug(ex.stdout_output)
else:
raise
def clone(self, package_name: str, target_path: str, anonymous: bool = False):
"""
clone a dist-git repo; this has to be done in current env
b/c we don't have the keytab in sandbox
"""
cmd = [self.fedpkg_exec]
if self.fas_username:
cmd += ["--user", self.fas_username]
cmd += ["-q", "clone"]
if anonymous:
cmd += ["-a"]
cmd += [package_name, target_path]
error_msg = (
f"Packit failed to clone the repository {package_name}; "
"please make sure that you are authorized to clone repositories "
"from Fedora dist-git - this may require SSH keys set up or "
"Kerberos ticket being active."
)
commands.run_command(cmd=cmd, error_message=error_msg)
| 35.044444 | 82 | 0.609808 | 3,412 | 0.721201 | 0 | 0 | 0 | 0 | 0 | 0 | 2,434 | 0.514479 |
b9a3c97262cf3c50a695832e8941374463a78067 | 901 | py | Python | tests/test_MaskedArrayCollection.py | ahaldane/NDducktype_tests | 4876416e5fbff7ba0d85445c0eeae432d6e80014 | [
"BSD-3-Clause"
] | 3 | 2020-06-18T14:18:39.000Z | 2021-07-22T18:05:52.000Z | tests/test_MaskedArrayCollection.py | ahaldane/NDducktype_tests | 4876416e5fbff7ba0d85445c0eeae432d6e80014 | [
"BSD-3-Clause"
] | 2 | 2020-07-19T15:44:09.000Z | 2020-07-28T23:22:21.000Z | tests/test_MaskedArrayCollection.py | ahaldane/NDducktype_tests | 4876416e5fbff7ba0d85445c0eeae432d6e80014 | [
"BSD-3-Clause"
] | 2 | 2019-06-20T00:20:13.000Z | 2020-09-20T21:42:52.000Z | #!/usr/bin/env python
from ndarray_ducktypes.ArrayCollection import ArrayCollection
from ndarray_ducktypes.MaskedArray import MaskedArray
from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection
import numpy as np
# Tests for Masked ArrayCollections.
#
# First try: Simply make an arraycollection of MaskedArrays. Downside: this
# strategy does not give a "filled" method. Probably to get a masked
# ArrayCollection we should really subclass ArrayCollection to have a
# fill_value and a filled() method
#a = MaskedArray(np.arange(10), np.arange(10)%3)
#b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2)
#c = ArrayCollection([('age', a), ('weight', b)])
#print(repr(c))
#c['age'] += 100
#print(repr(c))
## second try: Subclass of ArrayCollection
#c = MaskedArrayCollection([('age', a), ('weight', b)])
#print(repr(c))
#c['age'] += 100
#print(repr(c))
#print(repr(c.filled()))
| 31.068966 | 75 | 0.738069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.741398 |
b9a4cbf5401cd86949e3f94c13bc464c4725fcee | 192,704 | py | Python | rpc/gen/core_pb2.py | jasonjoo2010/core | 7c05ddbdac2e05a3d96db28f8bdfacf661907b82 | [
"MIT"
] | null | null | null | rpc/gen/core_pb2.py | jasonjoo2010/core | 7c05ddbdac2e05a3d96db28f8bdfacf661907b82 | [
"MIT"
] | null | null | null | rpc/gen/core_pb2.py | jasonjoo2010/core | 7c05ddbdac2e05a3d96db28f8bdfacf661907b82 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: core.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='core.proto',
package='pb',
syntax='proto3',
serialized_pb=_b('\n\ncore.proto\x12\x02pb\"\x07\n\x05\x45mpty\"\xb4\x01\n\x15ListContainersOptions\x12\x0f\n\x07\x61ppname\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x35\n\x06labels\x18\x04 \x03(\x0b\x32%.pb.ListContainersOptions.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"L\n\x13\x44\x65ployStatusOptions\x12\x0f\n\x07\x61ppname\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\"v\n\x13\x44\x65ployStatusMessage\x12\x0e\n\x06\x61\x63tion\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppname\x18\x02 \x01(\t\x12\x12\n\nentrypoint\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\n\n\x02id\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"0\n\x03Pod\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x02 \x01(\t\x12\r\n\x05\x66\x61vor\x18\x03 \x01(\t\"\x1d\n\x04Pods\x12\x15\n\x04pods\x18\x01 \x03(\x0b\x32\x07.pb.Pod\"\xfc\x02\n\x0bPodResource\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x03\x63pu\x18\x02 \x03(\x0b\x32\x18.pb.PodResource.CpuEntry\x12+\n\x06memory\x18\x03 \x03(\x0b\x32\x1b.pb.PodResource.MemoryEntry\x12\'\n\x04\x64iff\x18\x04 \x03(\x0b\x32\x19.pb.PodResource.DiffEntry\x12+\n\x06\x64\x65tail\x18\x05 \x03(\x0b\x32\x1b.pb.PodResource.DetailEntry\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a-\n\x0bMemoryEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a+\n\tDiffEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x1a-\n\x0b\x44\x65tailEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"5\n\x12ListNetworkOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x0e\n\x06\x64river\x18\x02 \x01(\t\"(\n\x07Network\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07subnets\x18\x02 \x03(\t\")\n\x08Networks\x12\x1d\n\x08networks\x18\x01 \x03(\x0b\x32\x0b.pb.Network\"\x9e\x03\n\x04Node\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\x1e\n\x03\x63pu\x18\x04 \x03(\x0b\x32\x11.pb.Node.CpuEntry\x12\x10\n\x08\x63pu_used\x18\x05 \x01(\x01\x12\x0e\n\x06memory\x18\x06 \x01(\x03\x12\x13\n\x0bmemory_used\x18\x07 \x01(\x03\x12\x11\n\tavailable\x18\x08 \x01(\x08\x12$\n\x06labels\x18\t \x03(\x0b\x32\x14.pb.Node.LabelsEntry\x12\x13\n\x0binit_memory\x18\n \x01(\x03\x12\'\n\x08init_cpu\x18\x0b \x03(\x0b\x32\x15.pb.Node.InitCpuEntry\x12\x0c\n\x04info\x18\x0c \x01(\t\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cInitCpuEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\" \n\x05Nodes\x12\x17\n\x05nodes\x18\x01 \x03(\x0b\x32\x08.pb.Node\"E\n\rNodeAvailable\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\x12\x11\n\tavailable\x18\x03 \x01(\x08\"\xb8\x03\n\tContainer\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12#\n\x03\x63pu\x18\x05 \x03(\x0b\x32\x16.pb.Container.CpuEntry\x12\r\n\x05quota\x18\x06 \x01(\x01\x12\x0e\n\x06memory\x18\x07 \x01(\x03\x12\x12\n\nprivileged\x18\x08 \x01(\x08\x12)\n\x06labels\x18\t \x03(\x0b\x32\x19.pb.Container.LabelsEntry\x12+\n\x07publish\x18\n \x03(\x0b\x32\x1a.pb.Container.PublishEntry\x12\r\n\x05image\x18\x0b \x01(\t\x12\x0f\n\x07inspect\x18\x0c \x01(\x0c\x12\x13\n\x0bstatus_data\x18\r \x01(\x0c\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cPublishEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"k\n\x18\x43ontainerDeployedOptions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppname\x18\x02 \x01(\t\x12\x12\n\nentrypoint\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\"/\n\nContainers\x12!\n\ncontainers\x18\x01 \x03(\x0b\x32\r.pb.Container\"\x19\n\x0b\x43ontainerID\x12\n\n\x02id\x18\x01 \x01(\t\"\x1b\n\x0c\x43ontainerIDs\x12\x0b\n\x03ids\x18\x01 \x03(\t\"4\n\x16RemoveContainerOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\"7\n\x0eReallocOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\x0b\n\x03\x63pu\x18\x02 \x01(\x01\x12\x0b\n\x03mem\x18\x03 \x01(\x03\":\n\rAddPodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x66\x61vor\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x03 \x01(\t\" \n\x10RemovePodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\rGetPodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xf7\x01\n\x0e\x41\x64\x64NodeOptions\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\n\n\x02\x63\x61\x18\x04 \x01(\t\x12\x0c\n\x04\x63\x65rt\x18\x05 \x01(\t\x12\x0b\n\x03key\x18\x06 \x01(\t\x12\x0b\n\x03\x63pu\x18\x07 \x01(\x05\x12\r\n\x05share\x18\x08 \x01(\x05\x12\x0e\n\x06memory\x18\t \x01(\x03\x12.\n\x06labels\x18\n \x03(\x0b\x32\x1e.pb.AddNodeOptions.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"6\n\x11RemoveNodeOptions\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\"3\n\x0eGetNodeOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\"0\n\x10ListNodesOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x0b\n\x03\x61ll\x18\x02 \x01(\x08\"\x8e\x04\n\x05\x42uild\x12\x0c\n\x04\x62\x61se\x18\x01 \x01(\t\x12\x0c\n\x04repo\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x0b\n\x03\x64ir\x18\x04 \x01(\t\x12\x11\n\tsubmodule\x18\x05 \x01(\x08\x12\x10\n\x08\x63ommands\x18\x06 \x03(\t\x12!\n\x04\x65nvs\x18\x07 \x03(\x0b\x32\x13.pb.Build.EnvsEntry\x12!\n\x04\x61rgs\x18\x08 \x03(\x0b\x32\x13.pb.Build.ArgsEntry\x12%\n\x06labels\x18\t \x03(\x0b\x32\x15.pb.Build.LabelsEntry\x12+\n\tartifacts\x18\n \x03(\x0b\x32\x18.pb.Build.ArtifactsEntry\x12#\n\x05\x63\x61\x63he\x18\x0b \x03(\x0b\x32\x14.pb.Build.CacheEntry\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a,\n\nCacheEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"z\n\x06\x42uilds\x12\x0e\n\x06stages\x18\x01 \x03(\t\x12&\n\x06\x62uilds\x18\x02 \x03(\x0b\x32\x16.pb.Builds.BuildsEntry\x1a\x38\n\x0b\x42uildsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.pb.Build:\x02\x38\x01\"s\n\x11\x42uildImageOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x0b\n\x03uid\x18\x03 \x01(\x05\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12\x1a\n\x06\x62uilds\x18\x05 \x01(\x0b\x32\n.pb.Builds\x12\x0b\n\x03tar\x18\x06 \x01(\x0c\"F\n\x0bHookOptions\x12\x13\n\x0b\x61\x66ter_start\x18\x01 \x03(\t\x12\x13\n\x0b\x62\x65\x66ore_stop\x18\x02 \x03(\t\x12\r\n\x05\x66orce\x18\x03 \x01(\x08\"U\n\x12HealthCheckOptions\x12\x11\n\ttcp_ports\x18\x01 \x03(\t\x12\x11\n\thttp_port\x18\x02 \x01(\t\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\x05\"u\n\nLogOptions\x12\x0c\n\x04type\x18\x01 \x01(\t\x12*\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x1a.pb.LogOptions.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xca\x02\n\x11\x45ntrypointOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x12\n\nprivileged\x18\x03 \x01(\x08\x12\x0b\n\x03\x64ir\x18\x04 \x01(\t\x12\x1b\n\x03log\x18\x05 \x01(\x0b\x32\x0e.pb.LogOptions\x12\x0f\n\x07publish\x18\x06 \x03(\t\x12+\n\x0bhealthcheck\x18\x07 \x01(\x0b\x32\x16.pb.HealthCheckOptions\x12\x1d\n\x04hook\x18\x08 \x01(\x0b\x32\x0f.pb.HookOptions\x12\x16\n\x0erestart_policy\x18\t \x01(\t\x12\x33\n\x07sysctls\x18\n \x03(\x0b\x32\".pb.EntrypointOptions.SysctlsEntry\x1a.\n\x0cSysctlsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x88\x06\n\rDeployOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12)\n\nentrypoint\x18\x02 \x01(\x0b\x32\x15.pb.EntrypointOptions\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\r\n\x05image\x18\x05 \x01(\t\x12\x12\n\nextra_args\x18\x06 \x01(\t\x12\x11\n\tcpu_quota\x18\x07 \x01(\x01\x12\x0e\n\x06memory\x18\x08 \x01(\x03\x12\r\n\x05\x63ount\x18\t \x01(\x05\x12\x0b\n\x03\x65nv\x18\n \x03(\t\x12\x0b\n\x03\x64ns\x18\x0b \x03(\t\x12\x13\n\x0b\x65xtra_hosts\x18\x0c \x03(\t\x12\x0f\n\x07volumes\x18\r \x03(\t\x12\x31\n\x08networks\x18\x0e \x03(\x0b\x32\x1f.pb.DeployOptions.NetworksEntry\x12\x13\n\x0bnetworkmode\x18\x0f \x01(\t\x12\x0c\n\x04user\x18\x10 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x11 \x01(\x08\x12\x11\n\topenStdin\x18\x12 \x01(\x08\x12-\n\x06labels\x18\x13 \x03(\x0b\x32\x1d.pb.DeployOptions.LabelsEntry\x12\x35\n\nnodelabels\x18\x14 \x03(\x0b\x32!.pb.DeployOptions.NodelabelsEntry\x12\x15\n\rdeploy_method\x18\x15 \x01(\t\x12)\n\x04\x64\x61ta\x18\x16 \x03(\x0b\x32\x1b.pb.DeployOptions.DataEntry\x12\x11\n\tsoftlimit\x18\x17 \x01(\x08\x12\x13\n\x0bnodes_limit\x18\x18 \x01(\x05\x1a/\n\rNetworksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fNodelabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"\xb5\x02\n\x0eReplaceOptions\x12$\n\tdeployOpt\x18\x01 \x01(\x0b\x32\x11.pb.DeployOptions\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12;\n\rfilter_labels\x18\x03 \x03(\x0b\x32$.pb.ReplaceOptions.FilterLabelsEntry\x12*\n\x04\x63opy\x18\x04 \x03(\x0b\x32\x1c.pb.ReplaceOptions.CopyEntry\x12\x0b\n\x03ids\x18\x05 \x03(\t\x12\x16\n\x0enetworkinherit\x18\x06 \x01(\x08\x1a\x33\n\x11\x46ilterLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tCopyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x11\x43\x61\x63heImageOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\"d\n\x12RemoveImageOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\x12\r\n\x05prune\x18\x05 \x01(\x08\"\x1a\n\tCopyPaths\x12\r\n\x05paths\x18\x01 \x03(\t\"{\n\x0b\x43opyOptions\x12-\n\x07targets\x18\x01 \x03(\x0b\x32\x1c.pb.CopyOptions.TargetsEntry\x1a=\n\x0cTargetsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.pb.CopyPaths:\x02\x38\x01\",\n\x0b\x45rrorDetail\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x87\x01\n\x11\x42uildImageMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x10\n\x08progress\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\x12\x0e\n\x06stream\x18\x05 \x01(\t\x12%\n\x0c\x65rror_detail\x18\x06 \x01(\x0b\x32\x0f.pb.ErrorDetail\"\xea\x02\n\x16\x43reateContainerMessage\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0f\n\x07success\x18\x06 \x01(\x08\x12\x30\n\x03\x63pu\x18\x07 \x03(\x0b\x32#.pb.CreateContainerMessage.CpuEntry\x12\r\n\x05quota\x18\x08 \x01(\x01\x12\x0e\n\x06memory\x18\t \x01(\x03\x12\x38\n\x07publish\x18\n \x03(\x0b\x32\'.pb.CreateContainerMessage.PublishEntry\x12\x0c\n\x04hook\x18\x0b \x01(\x0c\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a.\n\x0cPublishEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x80\x01\n\x17ReplaceContainerMessage\x12*\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x1a.pb.CreateContainerMessage\x12*\n\x06remove\x18\x02 \x01(\x0b\x32\x1a.pb.RemoveContainerMessage\x12\r\n\x05\x65rror\x18\x03 \x01(\t\"7\n\x11RunAndWaitMessage\x12\x14\n\x0c\x63ontainer_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"V\n\x11\x43\x61\x63heImageMessage\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"F\n\x12RemoveImageMessage\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08messages\x18\x03 \x03(\t\"C\n\x16RemoveContainerMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0c\n\x04hook\x18\x03 \x01(\t\"5\n\x16ReallocResourceMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\"b\n\x0b\x43opyMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0c\n\x04path\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"J\n\x11RunAndWaitOptions\x12(\n\rDeployOptions\x18\x01 \x01(\x0b\x32\x11.pb.DeployOptions\x12\x0b\n\x03\x43md\x18\x02 \x01(\x0c\"4\n\x17\x43ontrolContainerOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"B\n\x17\x43ontrolContainerMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0c\n\x04hook\x18\x03 \x01(\x0c\x32\xcb\x0c\n\x07\x43oreRPC\x12!\n\x08ListPods\x12\t.pb.Empty\x1a\x08.pb.Pods\"\x00\x12&\n\x06\x41\x64\x64Pod\x12\x11.pb.AddPodOptions\x1a\x07.pb.Pod\"\x00\x12.\n\tRemovePod\x12\x14.pb.RemovePodOptions\x1a\t.pb.Empty\"\x00\x12&\n\x06GetPod\x12\x11.pb.GetPodOptions\x1a\x07.pb.Pod\"\x00\x12\x36\n\x0eGetPodResource\x12\x11.pb.GetPodOptions\x1a\x0f.pb.PodResource\"\x00\x12)\n\x07\x41\x64\x64Node\x12\x12.pb.AddNodeOptions\x1a\x08.pb.Node\"\x00\x12.\n\nRemoveNode\x12\x15.pb.RemoveNodeOptions\x1a\x07.pb.Pod\"\x00\x12\x31\n\x10SetNodeAvailable\x12\x11.pb.NodeAvailable\x1a\x08.pb.Node\"\x00\x12)\n\x07GetNode\x12\x12.pb.GetNodeOptions\x1a\x08.pb.Node\"\x00\x12\x30\n\x0cGetContainer\x12\x0f.pb.ContainerID\x1a\r.pb.Container\"\x00\x12\x33\n\rGetContainers\x12\x10.pb.ContainerIDs\x1a\x0e.pb.Containers\"\x00\x12/\n\rGetNodeByName\x12\x12.pb.GetNodeOptions\x1a\x08.pb.Node\"\x00\x12\x31\n\x0cListPodNodes\x12\x14.pb.ListNodesOptions\x1a\t.pb.Nodes\"\x00\x12\x36\n\x0cListNetworks\x12\x16.pb.ListNetworkOptions\x1a\x0c.pb.Networks\"\x00\x12=\n\x0eListContainers\x12\x19.pb.ListContainersOptions\x1a\x0e.pb.Containers\"\x00\x12:\n\x12ListNodeContainers\x12\x12.pb.GetNodeOptions\x1a\x0e.pb.Containers\"\x00\x12>\n\x11\x43ontainerDeployed\x12\x1c.pb.ContainerDeployedOptions\x1a\t.pb.Empty\"\x00\x12,\n\x04\x43opy\x12\x0f.pb.CopyOptions\x1a\x0f.pb.CopyMessage\"\x00\x30\x01\x12>\n\nBuildImage\x12\x15.pb.BuildImageOptions\x1a\x15.pb.BuildImageMessage\"\x00\x30\x01\x12>\n\nCacheImage\x12\x15.pb.CacheImageOptions\x1a\x15.pb.CacheImageMessage\"\x00\x30\x01\x12\x41\n\x0bRemoveImage\x12\x16.pb.RemoveImageOptions\x1a\x16.pb.RemoveImageMessage\"\x00\x30\x01\x12\x44\n\x0c\x44\x65ployStatus\x12\x17.pb.DeployStatusOptions\x1a\x17.pb.DeployStatusMessage\"\x00\x30\x01\x12@\n\nRunAndWait\x12\x15.pb.RunAndWaitOptions\x1a\x15.pb.RunAndWaitMessage\"\x00(\x01\x30\x01\x12\x44\n\x0f\x43reateContainer\x12\x11.pb.DeployOptions\x1a\x1a.pb.CreateContainerMessage\"\x00\x30\x01\x12G\n\x10ReplaceContainer\x12\x12.pb.ReplaceOptions\x1a\x1b.pb.ReplaceContainerMessage\"\x00\x30\x01\x12M\n\x0fRemoveContainer\x12\x1a.pb.RemoveContainerOptions\x1a\x1a.pb.RemoveContainerMessage\"\x00\x30\x01\x12P\n\x10\x43ontrolContainer\x12\x1b.pb.ControlContainerOptions\x1a\x1b.pb.ControlContainerMessage\"\x00\x30\x01\x12\x45\n\x0fReallocResource\x12\x12.pb.ReallocOptions\x1a\x1a.pb.ReallocResourceMessage\"\x00\x30\x01\x62\x06proto3')
)
_EMPTY = _descriptor.Descriptor(
name='Empty',
full_name='pb.Empty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=25,
)
_LISTCONTAINERSOPTIONS_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pb.ListContainersOptions.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.ListContainersOptions.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.ListContainersOptions.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=208,
)
_LISTCONTAINERSOPTIONS = _descriptor.Descriptor(
name='ListContainersOptions',
full_name='pb.ListContainersOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='appname', full_name='pb.ListContainersOptions.appname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entrypoint', full_name='pb.ListContainersOptions.entrypoint', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.ListContainersOptions.nodename', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pb.ListContainersOptions.labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTCONTAINERSOPTIONS_LABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=208,
)
_DEPLOYSTATUSOPTIONS = _descriptor.Descriptor(
name='DeployStatusOptions',
full_name='pb.DeployStatusOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='appname', full_name='pb.DeployStatusOptions.appname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entrypoint', full_name='pb.DeployStatusOptions.entrypoint', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.DeployStatusOptions.nodename', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=286,
)
_DEPLOYSTATUSMESSAGE = _descriptor.Descriptor(
name='DeployStatusMessage',
full_name='pb.DeployStatusMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='pb.DeployStatusMessage.action', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appname', full_name='pb.DeployStatusMessage.appname', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entrypoint', full_name='pb.DeployStatusMessage.entrypoint', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.DeployStatusMessage.nodename', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='pb.DeployStatusMessage.id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pb.DeployStatusMessage.data', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=288,
serialized_end=406,
)
_POD = _descriptor.Descriptor(
name='Pod',
full_name='pb.Pod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.Pod.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='pb.Pod.desc', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='favor', full_name='pb.Pod.favor', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=408,
serialized_end=456,
)
_PODS = _descriptor.Descriptor(
name='Pods',
full_name='pb.Pods',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pods', full_name='pb.Pods.pods', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=458,
serialized_end=487,
)
_PODRESOURCE_CPUENTRY = _descriptor.Descriptor(
name='CpuEntry',
full_name='pb.PodResource.CpuEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.PodResource.CpuEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.PodResource.CpuEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=689,
serialized_end=731,
)
_PODRESOURCE_MEMORYENTRY = _descriptor.Descriptor(
name='MemoryEntry',
full_name='pb.PodResource.MemoryEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.PodResource.MemoryEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.PodResource.MemoryEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=733,
serialized_end=778,
)
_PODRESOURCE_DIFFENTRY = _descriptor.Descriptor(
name='DiffEntry',
full_name='pb.PodResource.DiffEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.PodResource.DiffEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.PodResource.DiffEntry.value', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=780,
serialized_end=823,
)
_PODRESOURCE_DETAILENTRY = _descriptor.Descriptor(
name='DetailEntry',
full_name='pb.PodResource.DetailEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.PodResource.DetailEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.PodResource.DetailEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=825,
serialized_end=870,
)
_PODRESOURCE = _descriptor.Descriptor(
name='PodResource',
full_name='pb.PodResource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.PodResource.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu', full_name='pb.PodResource.cpu', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pb.PodResource.memory', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='diff', full_name='pb.PodResource.diff', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detail', full_name='pb.PodResource.detail', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PODRESOURCE_CPUENTRY, _PODRESOURCE_MEMORYENTRY, _PODRESOURCE_DIFFENTRY, _PODRESOURCE_DETAILENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=490,
serialized_end=870,
)
_LISTNETWORKOPTIONS = _descriptor.Descriptor(
name='ListNetworkOptions',
full_name='pb.ListNetworkOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='podname', full_name='pb.ListNetworkOptions.podname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='driver', full_name='pb.ListNetworkOptions.driver', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=872,
serialized_end=925,
)
_NETWORK = _descriptor.Descriptor(
name='Network',
full_name='pb.Network',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.Network.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subnets', full_name='pb.Network.subnets', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=927,
serialized_end=967,
)
_NETWORKS = _descriptor.Descriptor(
name='Networks',
full_name='pb.Networks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='networks', full_name='pb.Networks.networks', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=969,
serialized_end=1010,
)
_NODE_CPUENTRY = _descriptor.Descriptor(
name='CpuEntry',
full_name='pb.Node.CpuEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Node.CpuEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Node.CpuEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1290,
serialized_end=1332,
)
_NODE_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pb.Node.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Node.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Node.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=208,
)
_NODE_INITCPUENTRY = _descriptor.Descriptor(
name='InitCpuEntry',
full_name='pb.Node.InitCpuEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Node.InitCpuEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Node.InitCpuEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1381,
serialized_end=1427,
)
_NODE = _descriptor.Descriptor(
name='Node',
full_name='pb.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.Node.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endpoint', full_name='pb.Node.endpoint', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podname', full_name='pb.Node.podname', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu', full_name='pb.Node.cpu', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu_used', full_name='pb.Node.cpu_used', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pb.Node.memory', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory_used', full_name='pb.Node.memory_used', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='available', full_name='pb.Node.available', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pb.Node.labels', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='init_memory', full_name='pb.Node.init_memory', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='init_cpu', full_name='pb.Node.init_cpu', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='info', full_name='pb.Node.info', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_NODE_CPUENTRY, _NODE_LABELSENTRY, _NODE_INITCPUENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1013,
serialized_end=1427,
)
_NODES = _descriptor.Descriptor(
name='Nodes',
full_name='pb.Nodes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='pb.Nodes.nodes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1429,
serialized_end=1461,
)
_NODEAVAILABLE = _descriptor.Descriptor(
name='NodeAvailable',
full_name='pb.NodeAvailable',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.NodeAvailable.nodename', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podname', full_name='pb.NodeAvailable.podname', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='available', full_name='pb.NodeAvailable.available', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1463,
serialized_end=1532,
)
_CONTAINER_CPUENTRY = _descriptor.Descriptor(
name='CpuEntry',
full_name='pb.Container.CpuEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Container.CpuEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Container.CpuEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1290,
serialized_end=1332,
)
_CONTAINER_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pb.Container.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Container.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Container.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=208,
)
_CONTAINER_PUBLISHENTRY = _descriptor.Descriptor(
name='PublishEntry',
full_name='pb.Container.PublishEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Container.PublishEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Container.PublishEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1929,
serialized_end=1975,
)
_CONTAINER = _descriptor.Descriptor(
name='Container',
full_name='pb.Container',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.Container.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podname', full_name='pb.Container.podname', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.Container.nodename', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='pb.Container.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu', full_name='pb.Container.cpu', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quota', full_name='pb.Container.quota', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pb.Container.memory', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='privileged', full_name='pb.Container.privileged', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pb.Container.labels', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='publish', full_name='pb.Container.publish', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='pb.Container.image', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inspect', full_name='pb.Container.inspect', index=11,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_data', full_name='pb.Container.status_data', index=12,
number=13, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONTAINER_CPUENTRY, _CONTAINER_LABELSENTRY, _CONTAINER_PUBLISHENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1535,
serialized_end=1975,
)
_CONTAINERDEPLOYEDOPTIONS = _descriptor.Descriptor(
name='ContainerDeployedOptions',
full_name='pb.ContainerDeployedOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.ContainerDeployedOptions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appname', full_name='pb.ContainerDeployedOptions.appname', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entrypoint', full_name='pb.ContainerDeployedOptions.entrypoint', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.ContainerDeployedOptions.nodename', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pb.ContainerDeployedOptions.data', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1977,
serialized_end=2084,
)
_CONTAINERS = _descriptor.Descriptor(
name='Containers',
full_name='pb.Containers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='containers', full_name='pb.Containers.containers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2086,
serialized_end=2133,
)
_CONTAINERID = _descriptor.Descriptor(
name='ContainerID',
full_name='pb.ContainerID',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.ContainerID.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2135,
serialized_end=2160,
)
_CONTAINERIDS = _descriptor.Descriptor(
name='ContainerIDs',
full_name='pb.ContainerIDs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='pb.ContainerIDs.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2162,
serialized_end=2189,
)
_REMOVECONTAINEROPTIONS = _descriptor.Descriptor(
name='RemoveContainerOptions',
full_name='pb.RemoveContainerOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='pb.RemoveContainerOptions.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force', full_name='pb.RemoveContainerOptions.force', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2191,
serialized_end=2243,
)
_REALLOCOPTIONS = _descriptor.Descriptor(
name='ReallocOptions',
full_name='pb.ReallocOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='pb.ReallocOptions.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu', full_name='pb.ReallocOptions.cpu', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mem', full_name='pb.ReallocOptions.mem', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2245,
serialized_end=2300,
)
_ADDPODOPTIONS = _descriptor.Descriptor(
name='AddPodOptions',
full_name='pb.AddPodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.AddPodOptions.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='favor', full_name='pb.AddPodOptions.favor', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='pb.AddPodOptions.desc', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2302,
serialized_end=2360,
)
_REMOVEPODOPTIONS = _descriptor.Descriptor(
name='RemovePodOptions',
full_name='pb.RemovePodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.RemovePodOptions.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2362,
serialized_end=2394,
)
_GETPODOPTIONS = _descriptor.Descriptor(
name='GetPodOptions',
full_name='pb.GetPodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.GetPodOptions.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2396,
serialized_end=2425,
)
_ADDNODEOPTIONS_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pb.AddNodeOptions.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.AddNodeOptions.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.AddNodeOptions.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=208,
)
_ADDNODEOPTIONS = _descriptor.Descriptor(
name='AddNodeOptions',
full_name='pb.AddNodeOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.AddNodeOptions.nodename', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endpoint', full_name='pb.AddNodeOptions.endpoint', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podname', full_name='pb.AddNodeOptions.podname', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ca', full_name='pb.AddNodeOptions.ca', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cert', full_name='pb.AddNodeOptions.cert', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='pb.AddNodeOptions.key', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu', full_name='pb.AddNodeOptions.cpu', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='share', full_name='pb.AddNodeOptions.share', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pb.AddNodeOptions.memory', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pb.AddNodeOptions.labels', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ADDNODEOPTIONS_LABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2428,
serialized_end=2675,
)
_REMOVENODEOPTIONS = _descriptor.Descriptor(
name='RemoveNodeOptions',
full_name='pb.RemoveNodeOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.RemoveNodeOptions.nodename', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podname', full_name='pb.RemoveNodeOptions.podname', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2677,
serialized_end=2731,
)
_GETNODEOPTIONS = _descriptor.Descriptor(
name='GetNodeOptions',
full_name='pb.GetNodeOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='podname', full_name='pb.GetNodeOptions.podname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.GetNodeOptions.nodename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2733,
serialized_end=2784,
)
_LISTNODESOPTIONS = _descriptor.Descriptor(
name='ListNodesOptions',
full_name='pb.ListNodesOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='podname', full_name='pb.ListNodesOptions.podname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all', full_name='pb.ListNodesOptions.all', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2786,
serialized_end=2834,
)
_BUILD_ENVSENTRY = _descriptor.Descriptor(
name='EnvsEntry',
full_name='pb.Build.EnvsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Build.EnvsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Build.EnvsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3132,
serialized_end=3175,
)
_BUILD_ARGSENTRY = _descriptor.Descriptor(
name='ArgsEntry',
full_name='pb.Build.ArgsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Build.ArgsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Build.ArgsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3177,
serialized_end=3220,
)
_BUILD_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pb.Build.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Build.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Build.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=208,
)
_BUILD_ARTIFACTSENTRY = _descriptor.Descriptor(
name='ArtifactsEntry',
full_name='pb.Build.ArtifactsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Build.ArtifactsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Build.ArtifactsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3269,
serialized_end=3317,
)
_BUILD_CACHEENTRY = _descriptor.Descriptor(
name='CacheEntry',
full_name='pb.Build.CacheEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Build.CacheEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Build.CacheEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3319,
serialized_end=3363,
)
_BUILD = _descriptor.Descriptor(
name='Build',
full_name='pb.Build',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base', full_name='pb.Build.base', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repo', full_name='pb.Build.repo', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='pb.Build.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dir', full_name='pb.Build.dir', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='submodule', full_name='pb.Build.submodule', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='commands', full_name='pb.Build.commands', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='envs', full_name='pb.Build.envs', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='args', full_name='pb.Build.args', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pb.Build.labels', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artifacts', full_name='pb.Build.artifacts', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cache', full_name='pb.Build.cache', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BUILD_ENVSENTRY, _BUILD_ARGSENTRY, _BUILD_LABELSENTRY, _BUILD_ARTIFACTSENTRY, _BUILD_CACHEENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2837,
serialized_end=3363,
)
_BUILDS_BUILDSENTRY = _descriptor.Descriptor(
name='BuildsEntry',
full_name='pb.Builds.BuildsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.Builds.BuildsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.Builds.BuildsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3431,
serialized_end=3487,
)
_BUILDS = _descriptor.Descriptor(
name='Builds',
full_name='pb.Builds',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stages', full_name='pb.Builds.stages', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='builds', full_name='pb.Builds.builds', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BUILDS_BUILDSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3365,
serialized_end=3487,
)
_BUILDIMAGEOPTIONS = _descriptor.Descriptor(
name='BuildImageOptions',
full_name='pb.BuildImageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.BuildImageOptions.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='pb.BuildImageOptions.user', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uid', full_name='pb.BuildImageOptions.uid', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='pb.BuildImageOptions.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='builds', full_name='pb.BuildImageOptions.builds', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tar', full_name='pb.BuildImageOptions.tar', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3489,
serialized_end=3604,
)
_HOOKOPTIONS = _descriptor.Descriptor(
name='HookOptions',
full_name='pb.HookOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='after_start', full_name='pb.HookOptions.after_start', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='before_stop', full_name='pb.HookOptions.before_stop', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force', full_name='pb.HookOptions.force', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3606,
serialized_end=3676,
)
_HEALTHCHECKOPTIONS = _descriptor.Descriptor(
name='HealthCheckOptions',
full_name='pb.HealthCheckOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tcp_ports', full_name='pb.HealthCheckOptions.tcp_ports', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http_port', full_name='pb.HealthCheckOptions.http_port', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='pb.HealthCheckOptions.url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='pb.HealthCheckOptions.code', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3678,
serialized_end=3763,
)
_LOGOPTIONS_CONFIGENTRY = _descriptor.Descriptor(
name='ConfigEntry',
full_name='pb.LogOptions.ConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.LogOptions.ConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.LogOptions.ConfigEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3837,
serialized_end=3882,
)
_LOGOPTIONS = _descriptor.Descriptor(
name='LogOptions',
full_name='pb.LogOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='pb.LogOptions.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='pb.LogOptions.config', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LOGOPTIONS_CONFIGENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3765,
serialized_end=3882,
)
_ENTRYPOINTOPTIONS_SYSCTLSENTRY = _descriptor.Descriptor(
name='SysctlsEntry',
full_name='pb.EntrypointOptions.SysctlsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.EntrypointOptions.SysctlsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.EntrypointOptions.SysctlsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4169,
serialized_end=4215,
)
_ENTRYPOINTOPTIONS = _descriptor.Descriptor(
name='EntrypointOptions',
full_name='pb.EntrypointOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.EntrypointOptions.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='pb.EntrypointOptions.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='privileged', full_name='pb.EntrypointOptions.privileged', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dir', full_name='pb.EntrypointOptions.dir', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='log', full_name='pb.EntrypointOptions.log', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='publish', full_name='pb.EntrypointOptions.publish', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='healthcheck', full_name='pb.EntrypointOptions.healthcheck', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hook', full_name='pb.EntrypointOptions.hook', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restart_policy', full_name='pb.EntrypointOptions.restart_policy', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sysctls', full_name='pb.EntrypointOptions.sysctls', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ENTRYPOINTOPTIONS_SYSCTLSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3885,
serialized_end=4215,
)
_DEPLOYOPTIONS_NETWORKSENTRY = _descriptor.Descriptor(
name='NetworksEntry',
full_name='pb.DeployOptions.NetworksEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.DeployOptions.NetworksEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.DeployOptions.NetworksEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4804,
serialized_end=4851,
)
_DEPLOYOPTIONS_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pb.DeployOptions.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.DeployOptions.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.DeployOptions.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=208,
)
_DEPLOYOPTIONS_NODELABELSENTRY = _descriptor.Descriptor(
name='NodelabelsEntry',
full_name='pb.DeployOptions.NodelabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.DeployOptions.NodelabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.DeployOptions.NodelabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4900,
serialized_end=4949,
)
_DEPLOYOPTIONS_DATAENTRY = _descriptor.Descriptor(
name='DataEntry',
full_name='pb.DeployOptions.DataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.DeployOptions.DataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.DeployOptions.DataEntry.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4951,
serialized_end=4994,
)
_DEPLOYOPTIONS = _descriptor.Descriptor(
name='DeployOptions',
full_name='pb.DeployOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pb.DeployOptions.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entrypoint', full_name='pb.DeployOptions.entrypoint', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podname', full_name='pb.DeployOptions.podname', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.DeployOptions.nodename', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='pb.DeployOptions.image', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extra_args', full_name='pb.DeployOptions.extra_args', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu_quota', full_name='pb.DeployOptions.cpu_quota', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pb.DeployOptions.memory', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='count', full_name='pb.DeployOptions.count', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='env', full_name='pb.DeployOptions.env', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dns', full_name='pb.DeployOptions.dns', index=10,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extra_hosts', full_name='pb.DeployOptions.extra_hosts', index=11,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='volumes', full_name='pb.DeployOptions.volumes', index=12,
number=13, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='networks', full_name='pb.DeployOptions.networks', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='networkmode', full_name='pb.DeployOptions.networkmode', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='pb.DeployOptions.user', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug', full_name='pb.DeployOptions.debug', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='openStdin', full_name='pb.DeployOptions.openStdin', index=17,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pb.DeployOptions.labels', index=18,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodelabels', full_name='pb.DeployOptions.nodelabels', index=19,
number=20, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploy_method', full_name='pb.DeployOptions.deploy_method', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pb.DeployOptions.data', index=21,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='softlimit', full_name='pb.DeployOptions.softlimit', index=22,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodes_limit', full_name='pb.DeployOptions.nodes_limit', index=23,
number=24, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYOPTIONS_NETWORKSENTRY, _DEPLOYOPTIONS_LABELSENTRY, _DEPLOYOPTIONS_NODELABELSENTRY, _DEPLOYOPTIONS_DATAENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4218,
serialized_end=4994,
)
_REPLACEOPTIONS_FILTERLABELSENTRY = _descriptor.Descriptor(
name='FilterLabelsEntry',
full_name='pb.ReplaceOptions.FilterLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.ReplaceOptions.FilterLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.ReplaceOptions.FilterLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5210,
serialized_end=5261,
)
_REPLACEOPTIONS_COPYENTRY = _descriptor.Descriptor(
name='CopyEntry',
full_name='pb.ReplaceOptions.CopyEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.ReplaceOptions.CopyEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.ReplaceOptions.CopyEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5263,
serialized_end=5306,
)
_REPLACEOPTIONS = _descriptor.Descriptor(
name='ReplaceOptions',
full_name='pb.ReplaceOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deployOpt', full_name='pb.ReplaceOptions.deployOpt', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force', full_name='pb.ReplaceOptions.force', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter_labels', full_name='pb.ReplaceOptions.filter_labels', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='copy', full_name='pb.ReplaceOptions.copy', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ids', full_name='pb.ReplaceOptions.ids', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='networkinherit', full_name='pb.ReplaceOptions.networkinherit', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_REPLACEOPTIONS_FILTERLABELSENTRY, _REPLACEOPTIONS_COPYENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4997,
serialized_end=5306,
)
_CACHEIMAGEOPTIONS = _descriptor.Descriptor(
name='CacheImageOptions',
full_name='pb.CacheImageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='podname', full_name='pb.CacheImageOptions.podname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.CacheImageOptions.nodename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='images', full_name='pb.CacheImageOptions.images', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='pb.CacheImageOptions.step', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5308,
serialized_end=5392,
)
_REMOVEIMAGEOPTIONS = _descriptor.Descriptor(
name='RemoveImageOptions',
full_name='pb.RemoveImageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='podname', full_name='pb.RemoveImageOptions.podname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.RemoveImageOptions.nodename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='images', full_name='pb.RemoveImageOptions.images', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='pb.RemoveImageOptions.step', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prune', full_name='pb.RemoveImageOptions.prune', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5394,
serialized_end=5494,
)
_COPYPATHS = _descriptor.Descriptor(
name='CopyPaths',
full_name='pb.CopyPaths',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paths', full_name='pb.CopyPaths.paths', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5496,
serialized_end=5522,
)
_COPYOPTIONS_TARGETSENTRY = _descriptor.Descriptor(
name='TargetsEntry',
full_name='pb.CopyOptions.TargetsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.CopyOptions.TargetsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.CopyOptions.TargetsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5586,
serialized_end=5647,
)
_COPYOPTIONS = _descriptor.Descriptor(
name='CopyOptions',
full_name='pb.CopyOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='pb.CopyOptions.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COPYOPTIONS_TARGETSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5524,
serialized_end=5647,
)
_ERRORDETAIL = _descriptor.Descriptor(
name='ErrorDetail',
full_name='pb.ErrorDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='pb.ErrorDetail.code', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='pb.ErrorDetail.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5649,
serialized_end=5693,
)
_BUILDIMAGEMESSAGE = _descriptor.Descriptor(
name='BuildImageMessage',
full_name='pb.BuildImageMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.BuildImageMessage.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='pb.BuildImageMessage.status', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='progress', full_name='pb.BuildImageMessage.progress', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='pb.BuildImageMessage.error', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stream', full_name='pb.BuildImageMessage.stream', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error_detail', full_name='pb.BuildImageMessage.error_detail', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5696,
serialized_end=5831,
)
_CREATECONTAINERMESSAGE_CPUENTRY = _descriptor.Descriptor(
name='CpuEntry',
full_name='pb.CreateContainerMessage.CpuEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.CreateContainerMessage.CpuEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.CreateContainerMessage.CpuEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1290,
serialized_end=1332,
)
_CREATECONTAINERMESSAGE_PUBLISHENTRY = _descriptor.Descriptor(
name='PublishEntry',
full_name='pb.CreateContainerMessage.PublishEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pb.CreateContainerMessage.PublishEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pb.CreateContainerMessage.PublishEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1929,
serialized_end=1975,
)
_CREATECONTAINERMESSAGE = _descriptor.Descriptor(
name='CreateContainerMessage',
full_name='pb.CreateContainerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='podname', full_name='pb.CreateContainerMessage.podname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.CreateContainerMessage.nodename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='pb.CreateContainerMessage.id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='pb.CreateContainerMessage.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='pb.CreateContainerMessage.error', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='success', full_name='pb.CreateContainerMessage.success', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpu', full_name='pb.CreateContainerMessage.cpu', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quota', full_name='pb.CreateContainerMessage.quota', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pb.CreateContainerMessage.memory', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='publish', full_name='pb.CreateContainerMessage.publish', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hook', full_name='pb.CreateContainerMessage.hook', index=10,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATECONTAINERMESSAGE_CPUENTRY, _CREATECONTAINERMESSAGE_PUBLISHENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5834,
serialized_end=6196,
)
_REPLACECONTAINERMESSAGE = _descriptor.Descriptor(
name='ReplaceContainerMessage',
full_name='pb.ReplaceContainerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create', full_name='pb.ReplaceContainerMessage.create', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove', full_name='pb.ReplaceContainerMessage.remove', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='pb.ReplaceContainerMessage.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6199,
serialized_end=6327,
)
_RUNANDWAITMESSAGE = _descriptor.Descriptor(
name='RunAndWaitMessage',
full_name='pb.RunAndWaitMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='container_id', full_name='pb.RunAndWaitMessage.container_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pb.RunAndWaitMessage.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6329,
serialized_end=6384,
)
_CACHEIMAGEMESSAGE = _descriptor.Descriptor(
name='CacheImageMessage',
full_name='pb.CacheImageMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='pb.CacheImageMessage.image', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='success', full_name='pb.CacheImageMessage.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nodename', full_name='pb.CacheImageMessage.nodename', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='pb.CacheImageMessage.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6386,
serialized_end=6472,
)
_REMOVEIMAGEMESSAGE = _descriptor.Descriptor(
name='RemoveImageMessage',
full_name='pb.RemoveImageMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='pb.RemoveImageMessage.image', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='success', full_name='pb.RemoveImageMessage.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='messages', full_name='pb.RemoveImageMessage.messages', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6474,
serialized_end=6544,
)
_REMOVECONTAINERMESSAGE = _descriptor.Descriptor(
name='RemoveContainerMessage',
full_name='pb.RemoveContainerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.RemoveContainerMessage.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='success', full_name='pb.RemoveContainerMessage.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hook', full_name='pb.RemoveContainerMessage.hook', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6546,
serialized_end=6613,
)
_REALLOCRESOURCEMESSAGE = _descriptor.Descriptor(
name='ReallocResourceMessage',
full_name='pb.ReallocResourceMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.ReallocResourceMessage.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='success', full_name='pb.ReallocResourceMessage.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6615,
serialized_end=6668,
)
_COPYMESSAGE = _descriptor.Descriptor(
name='CopyMessage',
full_name='pb.CopyMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.CopyMessage.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='pb.CopyMessage.status', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='pb.CopyMessage.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='path', full_name='pb.CopyMessage.path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='pb.CopyMessage.error', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pb.CopyMessage.data', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6670,
serialized_end=6768,
)
_RUNANDWAITOPTIONS = _descriptor.Descriptor(
name='RunAndWaitOptions',
full_name='pb.RunAndWaitOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='DeployOptions', full_name='pb.RunAndWaitOptions.DeployOptions', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Cmd', full_name='pb.RunAndWaitOptions.Cmd', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6770,
serialized_end=6844,
)
_CONTROLCONTAINEROPTIONS = _descriptor.Descriptor(
name='ControlContainerOptions',
full_name='pb.ControlContainerOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='pb.ControlContainerOptions.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='pb.ControlContainerOptions.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6846,
serialized_end=6898,
)
_CONTROLCONTAINERMESSAGE = _descriptor.Descriptor(
name='ControlContainerMessage',
full_name='pb.ControlContainerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pb.ControlContainerMessage.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='pb.ControlContainerMessage.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hook', full_name='pb.ControlContainerMessage.hook', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6900,
serialized_end=6966,
)
_LISTCONTAINERSOPTIONS_LABELSENTRY.containing_type = _LISTCONTAINERSOPTIONS
_LISTCONTAINERSOPTIONS.fields_by_name['labels'].message_type = _LISTCONTAINERSOPTIONS_LABELSENTRY
_PODS.fields_by_name['pods'].message_type = _POD
_PODRESOURCE_CPUENTRY.containing_type = _PODRESOURCE
_PODRESOURCE_MEMORYENTRY.containing_type = _PODRESOURCE
_PODRESOURCE_DIFFENTRY.containing_type = _PODRESOURCE
_PODRESOURCE_DETAILENTRY.containing_type = _PODRESOURCE
_PODRESOURCE.fields_by_name['cpu'].message_type = _PODRESOURCE_CPUENTRY
_PODRESOURCE.fields_by_name['memory'].message_type = _PODRESOURCE_MEMORYENTRY
_PODRESOURCE.fields_by_name['diff'].message_type = _PODRESOURCE_DIFFENTRY
_PODRESOURCE.fields_by_name['detail'].message_type = _PODRESOURCE_DETAILENTRY
_NETWORKS.fields_by_name['networks'].message_type = _NETWORK
_NODE_CPUENTRY.containing_type = _NODE
_NODE_LABELSENTRY.containing_type = _NODE
_NODE_INITCPUENTRY.containing_type = _NODE
_NODE.fields_by_name['cpu'].message_type = _NODE_CPUENTRY
_NODE.fields_by_name['labels'].message_type = _NODE_LABELSENTRY
_NODE.fields_by_name['init_cpu'].message_type = _NODE_INITCPUENTRY
_NODES.fields_by_name['nodes'].message_type = _NODE
_CONTAINER_CPUENTRY.containing_type = _CONTAINER
_CONTAINER_LABELSENTRY.containing_type = _CONTAINER
_CONTAINER_PUBLISHENTRY.containing_type = _CONTAINER
_CONTAINER.fields_by_name['cpu'].message_type = _CONTAINER_CPUENTRY
_CONTAINER.fields_by_name['labels'].message_type = _CONTAINER_LABELSENTRY
_CONTAINER.fields_by_name['publish'].message_type = _CONTAINER_PUBLISHENTRY
_CONTAINERS.fields_by_name['containers'].message_type = _CONTAINER
_ADDNODEOPTIONS_LABELSENTRY.containing_type = _ADDNODEOPTIONS
_ADDNODEOPTIONS.fields_by_name['labels'].message_type = _ADDNODEOPTIONS_LABELSENTRY
_BUILD_ENVSENTRY.containing_type = _BUILD
_BUILD_ARGSENTRY.containing_type = _BUILD
_BUILD_LABELSENTRY.containing_type = _BUILD
_BUILD_ARTIFACTSENTRY.containing_type = _BUILD
_BUILD_CACHEENTRY.containing_type = _BUILD
_BUILD.fields_by_name['envs'].message_type = _BUILD_ENVSENTRY
_BUILD.fields_by_name['args'].message_type = _BUILD_ARGSENTRY
_BUILD.fields_by_name['labels'].message_type = _BUILD_LABELSENTRY
_BUILD.fields_by_name['artifacts'].message_type = _BUILD_ARTIFACTSENTRY
_BUILD.fields_by_name['cache'].message_type = _BUILD_CACHEENTRY
_BUILDS_BUILDSENTRY.fields_by_name['value'].message_type = _BUILD
_BUILDS_BUILDSENTRY.containing_type = _BUILDS
_BUILDS.fields_by_name['builds'].message_type = _BUILDS_BUILDSENTRY
_BUILDIMAGEOPTIONS.fields_by_name['builds'].message_type = _BUILDS
_LOGOPTIONS_CONFIGENTRY.containing_type = _LOGOPTIONS
_LOGOPTIONS.fields_by_name['config'].message_type = _LOGOPTIONS_CONFIGENTRY
_ENTRYPOINTOPTIONS_SYSCTLSENTRY.containing_type = _ENTRYPOINTOPTIONS
_ENTRYPOINTOPTIONS.fields_by_name['log'].message_type = _LOGOPTIONS
_ENTRYPOINTOPTIONS.fields_by_name['healthcheck'].message_type = _HEALTHCHECKOPTIONS
_ENTRYPOINTOPTIONS.fields_by_name['hook'].message_type = _HOOKOPTIONS
_ENTRYPOINTOPTIONS.fields_by_name['sysctls'].message_type = _ENTRYPOINTOPTIONS_SYSCTLSENTRY
_DEPLOYOPTIONS_NETWORKSENTRY.containing_type = _DEPLOYOPTIONS
_DEPLOYOPTIONS_LABELSENTRY.containing_type = _DEPLOYOPTIONS
_DEPLOYOPTIONS_NODELABELSENTRY.containing_type = _DEPLOYOPTIONS
_DEPLOYOPTIONS_DATAENTRY.containing_type = _DEPLOYOPTIONS
_DEPLOYOPTIONS.fields_by_name['entrypoint'].message_type = _ENTRYPOINTOPTIONS
_DEPLOYOPTIONS.fields_by_name['networks'].message_type = _DEPLOYOPTIONS_NETWORKSENTRY
_DEPLOYOPTIONS.fields_by_name['labels'].message_type = _DEPLOYOPTIONS_LABELSENTRY
_DEPLOYOPTIONS.fields_by_name['nodelabels'].message_type = _DEPLOYOPTIONS_NODELABELSENTRY
_DEPLOYOPTIONS.fields_by_name['data'].message_type = _DEPLOYOPTIONS_DATAENTRY
_REPLACEOPTIONS_FILTERLABELSENTRY.containing_type = _REPLACEOPTIONS
_REPLACEOPTIONS_COPYENTRY.containing_type = _REPLACEOPTIONS
_REPLACEOPTIONS.fields_by_name['deployOpt'].message_type = _DEPLOYOPTIONS
_REPLACEOPTIONS.fields_by_name['filter_labels'].message_type = _REPLACEOPTIONS_FILTERLABELSENTRY
_REPLACEOPTIONS.fields_by_name['copy'].message_type = _REPLACEOPTIONS_COPYENTRY
_COPYOPTIONS_TARGETSENTRY.fields_by_name['value'].message_type = _COPYPATHS
_COPYOPTIONS_TARGETSENTRY.containing_type = _COPYOPTIONS
_COPYOPTIONS.fields_by_name['targets'].message_type = _COPYOPTIONS_TARGETSENTRY
_BUILDIMAGEMESSAGE.fields_by_name['error_detail'].message_type = _ERRORDETAIL
_CREATECONTAINERMESSAGE_CPUENTRY.containing_type = _CREATECONTAINERMESSAGE
_CREATECONTAINERMESSAGE_PUBLISHENTRY.containing_type = _CREATECONTAINERMESSAGE
_CREATECONTAINERMESSAGE.fields_by_name['cpu'].message_type = _CREATECONTAINERMESSAGE_CPUENTRY
_CREATECONTAINERMESSAGE.fields_by_name['publish'].message_type = _CREATECONTAINERMESSAGE_PUBLISHENTRY
_REPLACECONTAINERMESSAGE.fields_by_name['create'].message_type = _CREATECONTAINERMESSAGE
_REPLACECONTAINERMESSAGE.fields_by_name['remove'].message_type = _REMOVECONTAINERMESSAGE
_RUNANDWAITOPTIONS.fields_by_name['DeployOptions'].message_type = _DEPLOYOPTIONS
DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
DESCRIPTOR.message_types_by_name['ListContainersOptions'] = _LISTCONTAINERSOPTIONS
DESCRIPTOR.message_types_by_name['DeployStatusOptions'] = _DEPLOYSTATUSOPTIONS
DESCRIPTOR.message_types_by_name['DeployStatusMessage'] = _DEPLOYSTATUSMESSAGE
DESCRIPTOR.message_types_by_name['Pod'] = _POD
DESCRIPTOR.message_types_by_name['Pods'] = _PODS
DESCRIPTOR.message_types_by_name['PodResource'] = _PODRESOURCE
DESCRIPTOR.message_types_by_name['ListNetworkOptions'] = _LISTNETWORKOPTIONS
DESCRIPTOR.message_types_by_name['Network'] = _NETWORK
DESCRIPTOR.message_types_by_name['Networks'] = _NETWORKS
DESCRIPTOR.message_types_by_name['Node'] = _NODE
DESCRIPTOR.message_types_by_name['Nodes'] = _NODES
DESCRIPTOR.message_types_by_name['NodeAvailable'] = _NODEAVAILABLE
DESCRIPTOR.message_types_by_name['Container'] = _CONTAINER
DESCRIPTOR.message_types_by_name['ContainerDeployedOptions'] = _CONTAINERDEPLOYEDOPTIONS
DESCRIPTOR.message_types_by_name['Containers'] = _CONTAINERS
DESCRIPTOR.message_types_by_name['ContainerID'] = _CONTAINERID
DESCRIPTOR.message_types_by_name['ContainerIDs'] = _CONTAINERIDS
DESCRIPTOR.message_types_by_name['RemoveContainerOptions'] = _REMOVECONTAINEROPTIONS
DESCRIPTOR.message_types_by_name['ReallocOptions'] = _REALLOCOPTIONS
DESCRIPTOR.message_types_by_name['AddPodOptions'] = _ADDPODOPTIONS
DESCRIPTOR.message_types_by_name['RemovePodOptions'] = _REMOVEPODOPTIONS
DESCRIPTOR.message_types_by_name['GetPodOptions'] = _GETPODOPTIONS
DESCRIPTOR.message_types_by_name['AddNodeOptions'] = _ADDNODEOPTIONS
DESCRIPTOR.message_types_by_name['RemoveNodeOptions'] = _REMOVENODEOPTIONS
DESCRIPTOR.message_types_by_name['GetNodeOptions'] = _GETNODEOPTIONS
DESCRIPTOR.message_types_by_name['ListNodesOptions'] = _LISTNODESOPTIONS
DESCRIPTOR.message_types_by_name['Build'] = _BUILD
DESCRIPTOR.message_types_by_name['Builds'] = _BUILDS
DESCRIPTOR.message_types_by_name['BuildImageOptions'] = _BUILDIMAGEOPTIONS
DESCRIPTOR.message_types_by_name['HookOptions'] = _HOOKOPTIONS
DESCRIPTOR.message_types_by_name['HealthCheckOptions'] = _HEALTHCHECKOPTIONS
DESCRIPTOR.message_types_by_name['LogOptions'] = _LOGOPTIONS
DESCRIPTOR.message_types_by_name['EntrypointOptions'] = _ENTRYPOINTOPTIONS
DESCRIPTOR.message_types_by_name['DeployOptions'] = _DEPLOYOPTIONS
DESCRIPTOR.message_types_by_name['ReplaceOptions'] = _REPLACEOPTIONS
DESCRIPTOR.message_types_by_name['CacheImageOptions'] = _CACHEIMAGEOPTIONS
DESCRIPTOR.message_types_by_name['RemoveImageOptions'] = _REMOVEIMAGEOPTIONS
DESCRIPTOR.message_types_by_name['CopyPaths'] = _COPYPATHS
DESCRIPTOR.message_types_by_name['CopyOptions'] = _COPYOPTIONS
DESCRIPTOR.message_types_by_name['ErrorDetail'] = _ERRORDETAIL
DESCRIPTOR.message_types_by_name['BuildImageMessage'] = _BUILDIMAGEMESSAGE
DESCRIPTOR.message_types_by_name['CreateContainerMessage'] = _CREATECONTAINERMESSAGE
DESCRIPTOR.message_types_by_name['ReplaceContainerMessage'] = _REPLACECONTAINERMESSAGE
DESCRIPTOR.message_types_by_name['RunAndWaitMessage'] = _RUNANDWAITMESSAGE
DESCRIPTOR.message_types_by_name['CacheImageMessage'] = _CACHEIMAGEMESSAGE
DESCRIPTOR.message_types_by_name['RemoveImageMessage'] = _REMOVEIMAGEMESSAGE
DESCRIPTOR.message_types_by_name['RemoveContainerMessage'] = _REMOVECONTAINERMESSAGE
DESCRIPTOR.message_types_by_name['ReallocResourceMessage'] = _REALLOCRESOURCEMESSAGE
DESCRIPTOR.message_types_by_name['CopyMessage'] = _COPYMESSAGE
DESCRIPTOR.message_types_by_name['RunAndWaitOptions'] = _RUNANDWAITOPTIONS
DESCRIPTOR.message_types_by_name['ControlContainerOptions'] = _CONTROLCONTAINEROPTIONS
DESCRIPTOR.message_types_by_name['ControlContainerMessage'] = _CONTROLCONTAINERMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
DESCRIPTOR = _EMPTY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Empty)
))
_sym_db.RegisterMessage(Empty)
ListContainersOptions = _reflection.GeneratedProtocolMessageType('ListContainersOptions', (_message.Message,), dict(
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _LISTCONTAINERSOPTIONS_LABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ListContainersOptions.LabelsEntry)
))
,
DESCRIPTOR = _LISTCONTAINERSOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ListContainersOptions)
))
_sym_db.RegisterMessage(ListContainersOptions)
_sym_db.RegisterMessage(ListContainersOptions.LabelsEntry)
DeployStatusOptions = _reflection.GeneratedProtocolMessageType('DeployStatusOptions', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYSTATUSOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployStatusOptions)
))
_sym_db.RegisterMessage(DeployStatusOptions)
DeployStatusMessage = _reflection.GeneratedProtocolMessageType('DeployStatusMessage', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYSTATUSMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployStatusMessage)
))
_sym_db.RegisterMessage(DeployStatusMessage)
Pod = _reflection.GeneratedProtocolMessageType('Pod', (_message.Message,), dict(
DESCRIPTOR = _POD,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Pod)
))
_sym_db.RegisterMessage(Pod)
Pods = _reflection.GeneratedProtocolMessageType('Pods', (_message.Message,), dict(
DESCRIPTOR = _PODS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Pods)
))
_sym_db.RegisterMessage(Pods)
PodResource = _reflection.GeneratedProtocolMessageType('PodResource', (_message.Message,), dict(
CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(
DESCRIPTOR = _PODRESOURCE_CPUENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.PodResource.CpuEntry)
))
,
MemoryEntry = _reflection.GeneratedProtocolMessageType('MemoryEntry', (_message.Message,), dict(
DESCRIPTOR = _PODRESOURCE_MEMORYENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.PodResource.MemoryEntry)
))
,
DiffEntry = _reflection.GeneratedProtocolMessageType('DiffEntry', (_message.Message,), dict(
DESCRIPTOR = _PODRESOURCE_DIFFENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.PodResource.DiffEntry)
))
,
DetailEntry = _reflection.GeneratedProtocolMessageType('DetailEntry', (_message.Message,), dict(
DESCRIPTOR = _PODRESOURCE_DETAILENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.PodResource.DetailEntry)
))
,
DESCRIPTOR = _PODRESOURCE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.PodResource)
))
_sym_db.RegisterMessage(PodResource)
_sym_db.RegisterMessage(PodResource.CpuEntry)
_sym_db.RegisterMessage(PodResource.MemoryEntry)
_sym_db.RegisterMessage(PodResource.DiffEntry)
_sym_db.RegisterMessage(PodResource.DetailEntry)
ListNetworkOptions = _reflection.GeneratedProtocolMessageType('ListNetworkOptions', (_message.Message,), dict(
DESCRIPTOR = _LISTNETWORKOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ListNetworkOptions)
))
_sym_db.RegisterMessage(ListNetworkOptions)
Network = _reflection.GeneratedProtocolMessageType('Network', (_message.Message,), dict(
DESCRIPTOR = _NETWORK,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Network)
))
_sym_db.RegisterMessage(Network)
Networks = _reflection.GeneratedProtocolMessageType('Networks', (_message.Message,), dict(
DESCRIPTOR = _NETWORKS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Networks)
))
_sym_db.RegisterMessage(Networks)
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(
CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(
DESCRIPTOR = _NODE_CPUENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Node.CpuEntry)
))
,
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _NODE_LABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Node.LabelsEntry)
))
,
InitCpuEntry = _reflection.GeneratedProtocolMessageType('InitCpuEntry', (_message.Message,), dict(
DESCRIPTOR = _NODE_INITCPUENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Node.InitCpuEntry)
))
,
DESCRIPTOR = _NODE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Node)
))
_sym_db.RegisterMessage(Node)
_sym_db.RegisterMessage(Node.CpuEntry)
_sym_db.RegisterMessage(Node.LabelsEntry)
_sym_db.RegisterMessage(Node.InitCpuEntry)
Nodes = _reflection.GeneratedProtocolMessageType('Nodes', (_message.Message,), dict(
DESCRIPTOR = _NODES,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Nodes)
))
_sym_db.RegisterMessage(Nodes)
NodeAvailable = _reflection.GeneratedProtocolMessageType('NodeAvailable', (_message.Message,), dict(
DESCRIPTOR = _NODEAVAILABLE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.NodeAvailable)
))
_sym_db.RegisterMessage(NodeAvailable)
Container = _reflection.GeneratedProtocolMessageType('Container', (_message.Message,), dict(
CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(
DESCRIPTOR = _CONTAINER_CPUENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Container.CpuEntry)
))
,
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _CONTAINER_LABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Container.LabelsEntry)
))
,
PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict(
DESCRIPTOR = _CONTAINER_PUBLISHENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Container.PublishEntry)
))
,
DESCRIPTOR = _CONTAINER,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Container)
))
_sym_db.RegisterMessage(Container)
_sym_db.RegisterMessage(Container.CpuEntry)
_sym_db.RegisterMessage(Container.LabelsEntry)
_sym_db.RegisterMessage(Container.PublishEntry)
ContainerDeployedOptions = _reflection.GeneratedProtocolMessageType('ContainerDeployedOptions', (_message.Message,), dict(
DESCRIPTOR = _CONTAINERDEPLOYEDOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ContainerDeployedOptions)
))
_sym_db.RegisterMessage(ContainerDeployedOptions)
Containers = _reflection.GeneratedProtocolMessageType('Containers', (_message.Message,), dict(
DESCRIPTOR = _CONTAINERS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Containers)
))
_sym_db.RegisterMessage(Containers)
ContainerID = _reflection.GeneratedProtocolMessageType('ContainerID', (_message.Message,), dict(
DESCRIPTOR = _CONTAINERID,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ContainerID)
))
_sym_db.RegisterMessage(ContainerID)
ContainerIDs = _reflection.GeneratedProtocolMessageType('ContainerIDs', (_message.Message,), dict(
DESCRIPTOR = _CONTAINERIDS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ContainerIDs)
))
_sym_db.RegisterMessage(ContainerIDs)
RemoveContainerOptions = _reflection.GeneratedProtocolMessageType('RemoveContainerOptions', (_message.Message,), dict(
DESCRIPTOR = _REMOVECONTAINEROPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RemoveContainerOptions)
))
_sym_db.RegisterMessage(RemoveContainerOptions)
ReallocOptions = _reflection.GeneratedProtocolMessageType('ReallocOptions', (_message.Message,), dict(
DESCRIPTOR = _REALLOCOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ReallocOptions)
))
_sym_db.RegisterMessage(ReallocOptions)
AddPodOptions = _reflection.GeneratedProtocolMessageType('AddPodOptions', (_message.Message,), dict(
DESCRIPTOR = _ADDPODOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.AddPodOptions)
))
_sym_db.RegisterMessage(AddPodOptions)
RemovePodOptions = _reflection.GeneratedProtocolMessageType('RemovePodOptions', (_message.Message,), dict(
DESCRIPTOR = _REMOVEPODOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RemovePodOptions)
))
_sym_db.RegisterMessage(RemovePodOptions)
GetPodOptions = _reflection.GeneratedProtocolMessageType('GetPodOptions', (_message.Message,), dict(
DESCRIPTOR = _GETPODOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.GetPodOptions)
))
_sym_db.RegisterMessage(GetPodOptions)
AddNodeOptions = _reflection.GeneratedProtocolMessageType('AddNodeOptions', (_message.Message,), dict(
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _ADDNODEOPTIONS_LABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.AddNodeOptions.LabelsEntry)
))
,
DESCRIPTOR = _ADDNODEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.AddNodeOptions)
))
_sym_db.RegisterMessage(AddNodeOptions)
_sym_db.RegisterMessage(AddNodeOptions.LabelsEntry)
RemoveNodeOptions = _reflection.GeneratedProtocolMessageType('RemoveNodeOptions', (_message.Message,), dict(
DESCRIPTOR = _REMOVENODEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RemoveNodeOptions)
))
_sym_db.RegisterMessage(RemoveNodeOptions)
GetNodeOptions = _reflection.GeneratedProtocolMessageType('GetNodeOptions', (_message.Message,), dict(
DESCRIPTOR = _GETNODEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.GetNodeOptions)
))
_sym_db.RegisterMessage(GetNodeOptions)
ListNodesOptions = _reflection.GeneratedProtocolMessageType('ListNodesOptions', (_message.Message,), dict(
DESCRIPTOR = _LISTNODESOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ListNodesOptions)
))
_sym_db.RegisterMessage(ListNodesOptions)
Build = _reflection.GeneratedProtocolMessageType('Build', (_message.Message,), dict(
EnvsEntry = _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), dict(
DESCRIPTOR = _BUILD_ENVSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Build.EnvsEntry)
))
,
ArgsEntry = _reflection.GeneratedProtocolMessageType('ArgsEntry', (_message.Message,), dict(
DESCRIPTOR = _BUILD_ARGSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Build.ArgsEntry)
))
,
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _BUILD_LABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Build.LabelsEntry)
))
,
ArtifactsEntry = _reflection.GeneratedProtocolMessageType('ArtifactsEntry', (_message.Message,), dict(
DESCRIPTOR = _BUILD_ARTIFACTSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Build.ArtifactsEntry)
))
,
CacheEntry = _reflection.GeneratedProtocolMessageType('CacheEntry', (_message.Message,), dict(
DESCRIPTOR = _BUILD_CACHEENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Build.CacheEntry)
))
,
DESCRIPTOR = _BUILD,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Build)
))
_sym_db.RegisterMessage(Build)
_sym_db.RegisterMessage(Build.EnvsEntry)
_sym_db.RegisterMessage(Build.ArgsEntry)
_sym_db.RegisterMessage(Build.LabelsEntry)
_sym_db.RegisterMessage(Build.ArtifactsEntry)
_sym_db.RegisterMessage(Build.CacheEntry)
Builds = _reflection.GeneratedProtocolMessageType('Builds', (_message.Message,), dict(
BuildsEntry = _reflection.GeneratedProtocolMessageType('BuildsEntry', (_message.Message,), dict(
DESCRIPTOR = _BUILDS_BUILDSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Builds.BuildsEntry)
))
,
DESCRIPTOR = _BUILDS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.Builds)
))
_sym_db.RegisterMessage(Builds)
_sym_db.RegisterMessage(Builds.BuildsEntry)
BuildImageOptions = _reflection.GeneratedProtocolMessageType('BuildImageOptions', (_message.Message,), dict(
DESCRIPTOR = _BUILDIMAGEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.BuildImageOptions)
))
_sym_db.RegisterMessage(BuildImageOptions)
HookOptions = _reflection.GeneratedProtocolMessageType('HookOptions', (_message.Message,), dict(
DESCRIPTOR = _HOOKOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.HookOptions)
))
_sym_db.RegisterMessage(HookOptions)
HealthCheckOptions = _reflection.GeneratedProtocolMessageType('HealthCheckOptions', (_message.Message,), dict(
DESCRIPTOR = _HEALTHCHECKOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.HealthCheckOptions)
))
_sym_db.RegisterMessage(HealthCheckOptions)
LogOptions = _reflection.GeneratedProtocolMessageType('LogOptions', (_message.Message,), dict(
ConfigEntry = _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _LOGOPTIONS_CONFIGENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.LogOptions.ConfigEntry)
))
,
DESCRIPTOR = _LOGOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.LogOptions)
))
_sym_db.RegisterMessage(LogOptions)
_sym_db.RegisterMessage(LogOptions.ConfigEntry)
EntrypointOptions = _reflection.GeneratedProtocolMessageType('EntrypointOptions', (_message.Message,), dict(
SysctlsEntry = _reflection.GeneratedProtocolMessageType('SysctlsEntry', (_message.Message,), dict(
DESCRIPTOR = _ENTRYPOINTOPTIONS_SYSCTLSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.EntrypointOptions.SysctlsEntry)
))
,
DESCRIPTOR = _ENTRYPOINTOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.EntrypointOptions)
))
_sym_db.RegisterMessage(EntrypointOptions)
_sym_db.RegisterMessage(EntrypointOptions.SysctlsEntry)
DeployOptions = _reflection.GeneratedProtocolMessageType('DeployOptions', (_message.Message,), dict(
NetworksEntry = _reflection.GeneratedProtocolMessageType('NetworksEntry', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYOPTIONS_NETWORKSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployOptions.NetworksEntry)
))
,
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYOPTIONS_LABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployOptions.LabelsEntry)
))
,
NodelabelsEntry = _reflection.GeneratedProtocolMessageType('NodelabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYOPTIONS_NODELABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployOptions.NodelabelsEntry)
))
,
DataEntry = _reflection.GeneratedProtocolMessageType('DataEntry', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYOPTIONS_DATAENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployOptions.DataEntry)
))
,
DESCRIPTOR = _DEPLOYOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.DeployOptions)
))
_sym_db.RegisterMessage(DeployOptions)
_sym_db.RegisterMessage(DeployOptions.NetworksEntry)
_sym_db.RegisterMessage(DeployOptions.LabelsEntry)
_sym_db.RegisterMessage(DeployOptions.NodelabelsEntry)
_sym_db.RegisterMessage(DeployOptions.DataEntry)
ReplaceOptions = _reflection.GeneratedProtocolMessageType('ReplaceOptions', (_message.Message,), dict(
FilterLabelsEntry = _reflection.GeneratedProtocolMessageType('FilterLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _REPLACEOPTIONS_FILTERLABELSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ReplaceOptions.FilterLabelsEntry)
))
,
CopyEntry = _reflection.GeneratedProtocolMessageType('CopyEntry', (_message.Message,), dict(
DESCRIPTOR = _REPLACEOPTIONS_COPYENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ReplaceOptions.CopyEntry)
))
,
DESCRIPTOR = _REPLACEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ReplaceOptions)
))
_sym_db.RegisterMessage(ReplaceOptions)
_sym_db.RegisterMessage(ReplaceOptions.FilterLabelsEntry)
_sym_db.RegisterMessage(ReplaceOptions.CopyEntry)
CacheImageOptions = _reflection.GeneratedProtocolMessageType('CacheImageOptions', (_message.Message,), dict(
DESCRIPTOR = _CACHEIMAGEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CacheImageOptions)
))
_sym_db.RegisterMessage(CacheImageOptions)
RemoveImageOptions = _reflection.GeneratedProtocolMessageType('RemoveImageOptions', (_message.Message,), dict(
DESCRIPTOR = _REMOVEIMAGEOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RemoveImageOptions)
))
_sym_db.RegisterMessage(RemoveImageOptions)
CopyPaths = _reflection.GeneratedProtocolMessageType('CopyPaths', (_message.Message,), dict(
DESCRIPTOR = _COPYPATHS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CopyPaths)
))
_sym_db.RegisterMessage(CopyPaths)
CopyOptions = _reflection.GeneratedProtocolMessageType('CopyOptions', (_message.Message,), dict(
TargetsEntry = _reflection.GeneratedProtocolMessageType('TargetsEntry', (_message.Message,), dict(
DESCRIPTOR = _COPYOPTIONS_TARGETSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CopyOptions.TargetsEntry)
))
,
DESCRIPTOR = _COPYOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CopyOptions)
))
_sym_db.RegisterMessage(CopyOptions)
_sym_db.RegisterMessage(CopyOptions.TargetsEntry)
ErrorDetail = _reflection.GeneratedProtocolMessageType('ErrorDetail', (_message.Message,), dict(
DESCRIPTOR = _ERRORDETAIL,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ErrorDetail)
))
_sym_db.RegisterMessage(ErrorDetail)
BuildImageMessage = _reflection.GeneratedProtocolMessageType('BuildImageMessage', (_message.Message,), dict(
DESCRIPTOR = _BUILDIMAGEMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.BuildImageMessage)
))
_sym_db.RegisterMessage(BuildImageMessage)
CreateContainerMessage = _reflection.GeneratedProtocolMessageType('CreateContainerMessage', (_message.Message,), dict(
CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(
DESCRIPTOR = _CREATECONTAINERMESSAGE_CPUENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.CpuEntry)
))
,
PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict(
DESCRIPTOR = _CREATECONTAINERMESSAGE_PUBLISHENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.PublishEntry)
))
,
DESCRIPTOR = _CREATECONTAINERMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CreateContainerMessage)
))
_sym_db.RegisterMessage(CreateContainerMessage)
_sym_db.RegisterMessage(CreateContainerMessage.CpuEntry)
_sym_db.RegisterMessage(CreateContainerMessage.PublishEntry)
ReplaceContainerMessage = _reflection.GeneratedProtocolMessageType('ReplaceContainerMessage', (_message.Message,), dict(
DESCRIPTOR = _REPLACECONTAINERMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ReplaceContainerMessage)
))
_sym_db.RegisterMessage(ReplaceContainerMessage)
RunAndWaitMessage = _reflection.GeneratedProtocolMessageType('RunAndWaitMessage', (_message.Message,), dict(
DESCRIPTOR = _RUNANDWAITMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RunAndWaitMessage)
))
_sym_db.RegisterMessage(RunAndWaitMessage)
CacheImageMessage = _reflection.GeneratedProtocolMessageType('CacheImageMessage', (_message.Message,), dict(
DESCRIPTOR = _CACHEIMAGEMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CacheImageMessage)
))
_sym_db.RegisterMessage(CacheImageMessage)
RemoveImageMessage = _reflection.GeneratedProtocolMessageType('RemoveImageMessage', (_message.Message,), dict(
DESCRIPTOR = _REMOVEIMAGEMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RemoveImageMessage)
))
_sym_db.RegisterMessage(RemoveImageMessage)
RemoveContainerMessage = _reflection.GeneratedProtocolMessageType('RemoveContainerMessage', (_message.Message,), dict(
DESCRIPTOR = _REMOVECONTAINERMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RemoveContainerMessage)
))
_sym_db.RegisterMessage(RemoveContainerMessage)
ReallocResourceMessage = _reflection.GeneratedProtocolMessageType('ReallocResourceMessage', (_message.Message,), dict(
DESCRIPTOR = _REALLOCRESOURCEMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ReallocResourceMessage)
))
_sym_db.RegisterMessage(ReallocResourceMessage)
CopyMessage = _reflection.GeneratedProtocolMessageType('CopyMessage', (_message.Message,), dict(
DESCRIPTOR = _COPYMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.CopyMessage)
))
_sym_db.RegisterMessage(CopyMessage)
RunAndWaitOptions = _reflection.GeneratedProtocolMessageType('RunAndWaitOptions', (_message.Message,), dict(
DESCRIPTOR = _RUNANDWAITOPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.RunAndWaitOptions)
))
_sym_db.RegisterMessage(RunAndWaitOptions)
ControlContainerOptions = _reflection.GeneratedProtocolMessageType('ControlContainerOptions', (_message.Message,), dict(
DESCRIPTOR = _CONTROLCONTAINEROPTIONS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ControlContainerOptions)
))
_sym_db.RegisterMessage(ControlContainerOptions)
ControlContainerMessage = _reflection.GeneratedProtocolMessageType('ControlContainerMessage', (_message.Message,), dict(
DESCRIPTOR = _CONTROLCONTAINERMESSAGE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:pb.ControlContainerMessage)
))
_sym_db.RegisterMessage(ControlContainerMessage)
_LISTCONTAINERSOPTIONS_LABELSENTRY.has_options = True
_LISTCONTAINERSOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PODRESOURCE_CPUENTRY.has_options = True
_PODRESOURCE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PODRESOURCE_MEMORYENTRY.has_options = True
_PODRESOURCE_MEMORYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PODRESOURCE_DIFFENTRY.has_options = True
_PODRESOURCE_DIFFENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PODRESOURCE_DETAILENTRY.has_options = True
_PODRESOURCE_DETAILENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_NODE_CPUENTRY.has_options = True
_NODE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_NODE_LABELSENTRY.has_options = True
_NODE_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_NODE_INITCPUENTRY.has_options = True
_NODE_INITCPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CONTAINER_CPUENTRY.has_options = True
_CONTAINER_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CONTAINER_LABELSENTRY.has_options = True
_CONTAINER_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CONTAINER_PUBLISHENTRY.has_options = True
_CONTAINER_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_ADDNODEOPTIONS_LABELSENTRY.has_options = True
_ADDNODEOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_BUILD_ENVSENTRY.has_options = True
_BUILD_ENVSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_BUILD_ARGSENTRY.has_options = True
_BUILD_ARGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_BUILD_LABELSENTRY.has_options = True
_BUILD_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_BUILD_ARTIFACTSENTRY.has_options = True
_BUILD_ARTIFACTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_BUILD_CACHEENTRY.has_options = True
_BUILD_CACHEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_BUILDS_BUILDSENTRY.has_options = True
_BUILDS_BUILDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_LOGOPTIONS_CONFIGENTRY.has_options = True
_LOGOPTIONS_CONFIGENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_ENTRYPOINTOPTIONS_SYSCTLSENTRY.has_options = True
_ENTRYPOINTOPTIONS_SYSCTLSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_DEPLOYOPTIONS_NETWORKSENTRY.has_options = True
_DEPLOYOPTIONS_NETWORKSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_DEPLOYOPTIONS_LABELSENTRY.has_options = True
_DEPLOYOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_DEPLOYOPTIONS_NODELABELSENTRY.has_options = True
_DEPLOYOPTIONS_NODELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_DEPLOYOPTIONS_DATAENTRY.has_options = True
_DEPLOYOPTIONS_DATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_REPLACEOPTIONS_FILTERLABELSENTRY.has_options = True
_REPLACEOPTIONS_FILTERLABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_REPLACEOPTIONS_COPYENTRY.has_options = True
_REPLACEOPTIONS_COPYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_COPYOPTIONS_TARGETSENTRY.has_options = True
_COPYOPTIONS_TARGETSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CREATECONTAINERMESSAGE_CPUENTRY.has_options = True
_CREATECONTAINERMESSAGE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CREATECONTAINERMESSAGE_PUBLISHENTRY.has_options = True
_CREATECONTAINERMESSAGE_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CORERPC = _descriptor.ServiceDescriptor(
name='CoreRPC',
full_name='pb.CoreRPC',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=6969,
serialized_end=8580,
methods=[
_descriptor.MethodDescriptor(
name='ListPods',
full_name='pb.CoreRPC.ListPods',
index=0,
containing_service=None,
input_type=_EMPTY,
output_type=_PODS,
options=None,
),
_descriptor.MethodDescriptor(
name='AddPod',
full_name='pb.CoreRPC.AddPod',
index=1,
containing_service=None,
input_type=_ADDPODOPTIONS,
output_type=_POD,
options=None,
),
_descriptor.MethodDescriptor(
name='RemovePod',
full_name='pb.CoreRPC.RemovePod',
index=2,
containing_service=None,
input_type=_REMOVEPODOPTIONS,
output_type=_EMPTY,
options=None,
),
_descriptor.MethodDescriptor(
name='GetPod',
full_name='pb.CoreRPC.GetPod',
index=3,
containing_service=None,
input_type=_GETPODOPTIONS,
output_type=_POD,
options=None,
),
_descriptor.MethodDescriptor(
name='GetPodResource',
full_name='pb.CoreRPC.GetPodResource',
index=4,
containing_service=None,
input_type=_GETPODOPTIONS,
output_type=_PODRESOURCE,
options=None,
),
_descriptor.MethodDescriptor(
name='AddNode',
full_name='pb.CoreRPC.AddNode',
index=5,
containing_service=None,
input_type=_ADDNODEOPTIONS,
output_type=_NODE,
options=None,
),
_descriptor.MethodDescriptor(
name='RemoveNode',
full_name='pb.CoreRPC.RemoveNode',
index=6,
containing_service=None,
input_type=_REMOVENODEOPTIONS,
output_type=_POD,
options=None,
),
_descriptor.MethodDescriptor(
name='SetNodeAvailable',
full_name='pb.CoreRPC.SetNodeAvailable',
index=7,
containing_service=None,
input_type=_NODEAVAILABLE,
output_type=_NODE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetNode',
full_name='pb.CoreRPC.GetNode',
index=8,
containing_service=None,
input_type=_GETNODEOPTIONS,
output_type=_NODE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetContainer',
full_name='pb.CoreRPC.GetContainer',
index=9,
containing_service=None,
input_type=_CONTAINERID,
output_type=_CONTAINER,
options=None,
),
_descriptor.MethodDescriptor(
name='GetContainers',
full_name='pb.CoreRPC.GetContainers',
index=10,
containing_service=None,
input_type=_CONTAINERIDS,
output_type=_CONTAINERS,
options=None,
),
_descriptor.MethodDescriptor(
name='GetNodeByName',
full_name='pb.CoreRPC.GetNodeByName',
index=11,
containing_service=None,
input_type=_GETNODEOPTIONS,
output_type=_NODE,
options=None,
),
_descriptor.MethodDescriptor(
name='ListPodNodes',
full_name='pb.CoreRPC.ListPodNodes',
index=12,
containing_service=None,
input_type=_LISTNODESOPTIONS,
output_type=_NODES,
options=None,
),
_descriptor.MethodDescriptor(
name='ListNetworks',
full_name='pb.CoreRPC.ListNetworks',
index=13,
containing_service=None,
input_type=_LISTNETWORKOPTIONS,
output_type=_NETWORKS,
options=None,
),
_descriptor.MethodDescriptor(
name='ListContainers',
full_name='pb.CoreRPC.ListContainers',
index=14,
containing_service=None,
input_type=_LISTCONTAINERSOPTIONS,
output_type=_CONTAINERS,
options=None,
),
_descriptor.MethodDescriptor(
name='ListNodeContainers',
full_name='pb.CoreRPC.ListNodeContainers',
index=15,
containing_service=None,
input_type=_GETNODEOPTIONS,
output_type=_CONTAINERS,
options=None,
),
_descriptor.MethodDescriptor(
name='ContainerDeployed',
full_name='pb.CoreRPC.ContainerDeployed',
index=16,
containing_service=None,
input_type=_CONTAINERDEPLOYEDOPTIONS,
output_type=_EMPTY,
options=None,
),
_descriptor.MethodDescriptor(
name='Copy',
full_name='pb.CoreRPC.Copy',
index=17,
containing_service=None,
input_type=_COPYOPTIONS,
output_type=_COPYMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='BuildImage',
full_name='pb.CoreRPC.BuildImage',
index=18,
containing_service=None,
input_type=_BUILDIMAGEOPTIONS,
output_type=_BUILDIMAGEMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='CacheImage',
full_name='pb.CoreRPC.CacheImage',
index=19,
containing_service=None,
input_type=_CACHEIMAGEOPTIONS,
output_type=_CACHEIMAGEMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='RemoveImage',
full_name='pb.CoreRPC.RemoveImage',
index=20,
containing_service=None,
input_type=_REMOVEIMAGEOPTIONS,
output_type=_REMOVEIMAGEMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='DeployStatus',
full_name='pb.CoreRPC.DeployStatus',
index=21,
containing_service=None,
input_type=_DEPLOYSTATUSOPTIONS,
output_type=_DEPLOYSTATUSMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='RunAndWait',
full_name='pb.CoreRPC.RunAndWait',
index=22,
containing_service=None,
input_type=_RUNANDWAITOPTIONS,
output_type=_RUNANDWAITMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='CreateContainer',
full_name='pb.CoreRPC.CreateContainer',
index=23,
containing_service=None,
input_type=_DEPLOYOPTIONS,
output_type=_CREATECONTAINERMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='ReplaceContainer',
full_name='pb.CoreRPC.ReplaceContainer',
index=24,
containing_service=None,
input_type=_REPLACEOPTIONS,
output_type=_REPLACECONTAINERMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='RemoveContainer',
full_name='pb.CoreRPC.RemoveContainer',
index=25,
containing_service=None,
input_type=_REMOVECONTAINEROPTIONS,
output_type=_REMOVECONTAINERMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='ControlContainer',
full_name='pb.CoreRPC.ControlContainer',
index=26,
containing_service=None,
input_type=_CONTROLCONTAINEROPTIONS,
output_type=_CONTROLCONTAINERMESSAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='ReallocResource',
full_name='pb.CoreRPC.ReallocResource',
index=27,
containing_service=None,
input_type=_REALLOCOPTIONS,
output_type=_REALLOCRESOURCEMESSAGE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_CORERPC)
DESCRIPTOR.services_by_name['CoreRPC'] = _CORERPC
# @@protoc_insertion_point(module_scope)
| 38.875126 | 16,249 | 0.731339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40,959 | 0.212549 |
b9a524c2d76717a70aa199aeb8c04e4579e1a276 | 2,217 | py | Python | src/models/text_node.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | src/models/text_node.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | src/models/text_node.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | from neomodel import StructuredNode, StringProperty, JSONProperty, \
Relationship, IntegerProperty
import numpy as np
import re
from models.text_relation import TextRelation
__all__ = ['TextNode']
class TextNode(StructuredNode):
order_id = IntegerProperty(required=True, unique_index=True)
label = StringProperty(required=True)
text = StringProperty(required=True)
alg_results = JSONProperty()
link = Relationship('TextNode', 'ALG', model=TextRelation)
def short(self):
res = ''.join([word.strip() + ' '
for word in re.split(r'[\n ]', self.text, 5)[:5]])
return res
def describe(self):
return f"""
<h1>Фрагмент: {self.order_id} </h1>
<table border="1" width=100%>
<caption>
Информация о вершине
</caption>
<tr>
<th>Количество символов</th>
<td>{self.character_num()}</td>
</tr>
<tr>
<th>Количество слов</th>
<td>{self.words_num()}</td>
</tr>
<tr>
<th>Количество предложений</th>
<td>{self.sentences_num()}</td>
</tr>
<tr>
<th>Количество связей</th>
<td>{len(self.link)}</td>
</tr>
</table>
"""
def preview(self, frag_num=0):
leading = 3
if frag_num > 0:
leading = int(np.floor(np.log10(frag_num))) + 1
if str(self.order_id) != str(self.label):
return f"{str(self.order_id).zfill(leading)}: " \
+ f"[{self.label}] {self.short()}..."
else:
return f"{str(self.order_id).zfill(leading)}: " \
+ f"[{self.label}] {self.short()}..."
return f"[{self.label}] {self.short()}..."
def words_num(self):
return len(self.text.split())
def character_num(self):
return len(self.text)
def sentences_num(self):
return len([s for s in self.text.split('.') if len(s) > 2])
| 31.671429 | 73 | 0.488498 | 2,088 | 0.903114 | 0 | 0 | 0 | 0 | 0 | 0 | 1,105 | 0.477941 |
b9a529f9f36fb2cce0a38f16148b6bc2117ab033 | 2,655 | py | Python | tests/test_bishop_generate.py | otaviocarvalho/chess-negamax | 21f1066611e581dac3257d3f46c71ca2b09b5964 | [
"MIT"
] | 6 | 2015-04-04T15:58:29.000Z | 2019-04-07T11:45:02.000Z | tests/test_bishop_generate.py | otaviocarvalho/chess-negamax | 21f1066611e581dac3257d3f46c71ca2b09b5964 | [
"MIT"
] | 1 | 2015-04-27T19:02:06.000Z | 2015-04-27T19:02:06.000Z | tests/test_bishop_generate.py | otaviocarvalho/chess-negamax | 21f1066611e581dac3257d3f46c71ca2b09b5964 | [
"MIT"
] | 3 | 2015-10-04T00:22:17.000Z | 2019-04-07T11:44:56.000Z | import unittest
from .helpers import StubBoard, StubPiece, C, WHITE, BLACK
class TestBishopGenerate(unittest.TestCase):
def get_bishop(self, board, team, position):
from chess.models import Bishop
return Bishop(board, team, position)
def compare_list(self, expected, results):
compared = []
for e in expected:
for r in results:
if e[0] == r[0] and e[1] == r[1]:
compared.append(True)
break
else:
compared.append(False)
return compared
def test_generate_topright(self):
board = StubBoard()
board[C('h7')] = StubPiece(board, BLACK, C('h7'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('f5'), C('g6'), C('h7')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
def test_generate_topleft(self):
board = StubBoard()
board[C('c6')] = StubPiece(board, WHITE, C('c6'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('d5')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
expected = [C('c6')]
correct = self.compare_list(expected, results)
self.assertFalse(any(correct))
def test_generate_bottomleft(self):
board = StubBoard()
board[C('c2')] = StubPiece(board, BLACK, C('c2'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('d3'), C('c2')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
expected = [C('b1')]
correct = self.compare_list(expected, results)
self.assertFalse(any(correct))
def test_generate_bottomright(self):
board = StubBoard()
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('f3'), C('g2'), C('h1')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
def test_generate_amount(self):
board = StubBoard()
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
self.assertEqual(len(results), 13)
board = StubBoard()
board[C('c6')] = StubPiece(board, WHITE, C('c6'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
self.assertEqual(len(results), 10)
if __name__ == '__main__':
unittest.main() | 30.872093 | 58 | 0.581921 | 2,530 | 0.952919 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.041431 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.